Upgrade from Django 0.96 to Django 1.0.2.
Risk: high (framework change)
Visibility: medium
Signed-off-by: James Ren <jamesren@google.com>
git-svn-id: http://test.kernel.org/svn/autotest/trunk@3457 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/frontend/afe/admin.py b/frontend/afe/admin.py
new file mode 100644
index 0000000..0f89f5b
--- /dev/null
+++ b/frontend/afe/admin.py
@@ -0,0 +1,94 @@
+"""Django 1.0 admin interface declarations."""
+
+from django.contrib import admin
+
+from autotest_lib.frontend import settings
+from autotest_lib.frontend.afe import model_logic, models
+
+
+class AtomicGroupAdmin(admin.ModelAdmin):
+ list_display = ('name', 'description', 'max_number_of_machines')
+
+ def queryset(self, request):
+ return models.AtomicGroup.valid_objects
+
+admin.site.register(models.AtomicGroup, AtomicGroupAdmin)
+
+
+class LabelAdmin(admin.ModelAdmin):
+ list_display = ('name', 'kernel_config')
+
+ def queryset(self, request):
+ return models.Label.valid_objects
+
+admin.site.register(models.Label, LabelAdmin)
+
+
+class UserAdmin(admin.ModelAdmin):
+ list_display = ('login', 'access_level')
+ search_fields = ('login',)
+
+admin.site.register(models.User, UserAdmin)
+
+
+class HostAdmin(admin.ModelAdmin):
+ # TODO(showard) - showing platform requires a SQL query for
+ # each row (since labels are many-to-many) - should we remove
+ # it?
+ list_display = ('hostname', 'platform', 'locked', 'status')
+ list_filter = ('labels', 'locked', 'protection')
+ search_fields = ('hostname', 'status')
+ filter_horizontal = ('labels',)
+
+ def queryset(self, request):
+ return models.Host.valid_objects
+
+admin.site.register(models.Host, HostAdmin)
+
+
+class TestAdmin(admin.ModelAdmin):
+ fields = ('name', 'author', 'test_category', 'test_class',
+ 'test_time', 'sync_count', 'test_type', 'path',
+ 'dependencies', 'experimental', 'run_verify',
+ 'description')
+ list_display = ('name', 'test_type', 'description', 'sync_count')
+ search_fields = ('name',)
+ filter_horizontal = ('dependency_labels',)
+
+admin.site.register(models.Test, TestAdmin)
+
+
+class ProfilerAdmin(admin.ModelAdmin):
+ list_display = ('name', 'description')
+ search_fields = ('name',)
+
+admin.site.register(models.Profiler, ProfilerAdmin)
+
+
+class AclGroupAdmin(admin.ModelAdmin):
+ list_display = ('name', 'description')
+ search_fields = ('name',)
+ filter_horizontal = ('users', 'hosts')
+
+admin.site.register(models.AclGroup, AclGroupAdmin)
+
+
+if settings.FULL_ADMIN:
+ class JobAdmin(admin.ModelAdmin):
+ list_display = ('id', 'owner', 'name', 'control_type')
+ filter_horizontal = ('dependency_labels',)
+
+ admin.site.register(models.Job, JobAdmin)
+
+ class IneligibleHostQueueAdmin(admin.ModelAdmin):
+ list_display = ('id', 'job', 'host')
+
+ admin.site.register(models.IneligibleHostQueue, IneligibleHostQueueAdmin)
+
+ class HostQueueEntryAdmin(admin.ModelAdmin):
+ list_display = ('id', 'job', 'host', 'status',
+ 'meta_host')
+
+ admin.site.register(models.HostQueueEntry, HostQueueEntryAdmin)
+
+ admin.site.register(models.AbortedHostQueueEntry)
diff --git a/frontend/afe/doctests/001_rpc_test.txt b/frontend/afe/doctests/001_rpc_test.txt
index 04e5940..b464785 100644
--- a/frontend/afe/doctests/001_rpc_test.txt
+++ b/frontend/afe/doctests/001_rpc_test.txt
@@ -8,8 +8,8 @@
# 'http://hostname:8000/afe/server/noauth/rpc/')
# )
>>> if 'rpc_interface' not in globals():
-... from frontend.afe import rpc_interface, models
-... from frontend import thread_local
+... from autotest_lib.frontend.afe import rpc_interface, models
+... from autotest_lib.frontend import thread_local
... # set up a user for us to "login" as
... user = models.User(login='debug_user')
... user.access_level = 100
@@ -210,27 +210,27 @@
# check labels for hosts
>>> data = rpc_interface.get_hosts(hostname='host1')
>>> data[0]['labels']
-['label1']
+[u'label1']
>>> data = rpc_interface.get_hosts(hostname='host2')
>>> data[0]['labels']
-['label1', 'label2']
+[u'label1', u'label2']
>>> data[0]['platform']
-'label2'
+u'label2'
# check host lists for labels -- use double underscore to specify fields of
# related objects
>>> data = rpc_interface.get_hosts(labels__name='label1')
>>> [host['hostname'] for host in data]
-['host1', 'host2']
+[u'host1', u'host2']
>>> data = rpc_interface.get_hosts(labels__name='label2')
>>> [host['hostname'] for host in data]
-['host2']
+[u'host2']
# remove a host from a label
>>> rpc_interface.host_remove_labels('host2', ['label2'])
>>> data = rpc_interface.get_hosts(hostname='host1')
>>> data[0]['labels']
-['label1']
+[u'label1']
>>> rpc_interface.get_hosts(labels__name='label2')
[]
@@ -247,36 +247,36 @@
# check labels for hosts
>>> data = rpc_interface.get_hosts(hostname='host1')
>>> data[0]['labels']
-['label1', 'label2']
+[u'label1', u'label2']
>>> data = rpc_interface.get_hosts(hostname='host2')
>>> data[0]['labels']
-['label2']
+[u'label2']
>>> data[0]['platform']
-'label2'
+u'label2'
# check host lists for labels -- use double underscore to specify fields of
# related objects
>>> data = rpc_interface.get_hosts(labels__name='label1')
>>> [host['hostname'] for host in data]
-['host1']
+[u'host1']
>>> data = rpc_interface.get_hosts(labels__name='label2')
>>> [host['hostname'] for host in data]
-['host1', 'host2']
+[u'host1', u'host2']
# remove a host from a label
>>> rpc_interface.label_remove_hosts('label2', ['host2'])
>>> data = rpc_interface.get_hosts(hostname='host1')
>>> data[0]['labels']
-['label1', 'label2']
+[u'label1', u'label2']
>>> data = rpc_interface.get_hosts(labels__name='label2')
>>> [host['hostname'] for host in data]
-['host1']
+[u'host1']
# Remove multiple hosts from a label
>>> rpc_interface.label_add_hosts('label2', ['host2'])
>>> data = rpc_interface.get_hosts(labels__name='label2')
>>> [host['hostname'] for host in data]
-['host1', 'host2']
+[u'host1', u'host2']
>>> rpc_interface.label_remove_hosts('label2', ['host2', 'host1'])
>>> rpc_interface.get_hosts(labels__name='label2')
[]
@@ -288,7 +288,7 @@
# other group
>>> data = rpc_interface.get_acl_groups(hosts__hostname='host1')
>>> [acl_group['name'] for acl_group in data]
-['Everyone']
+[u'Everyone']
>>> rpc_interface.add_user(login='showard', access_level=0)
2
@@ -299,31 +299,31 @@
>>> rpc_interface.acl_group_add_hosts('my_group', ['host1'])
>>> data = rpc_interface.get_acl_groups(name='my_group')
>>> data[0]['users']
-['debug_user', 'showard']
+[u'debug_user', u'showard']
>>> data[0]['hosts']
-['host1']
+[u'host1']
>>> data = rpc_interface.get_acl_groups(users__login='showard')
>>> [acl_group['name'] for acl_group in data]
-['Everyone', 'my_group']
+[u'Everyone', u'my_group']
# note host has been automatically removed from 'Everyone'
>>> data = rpc_interface.get_acl_groups(hosts__hostname='host1')
>>> [acl_group['name'] for acl_group in data]
-['my_group']
+[u'my_group']
>>> rpc_interface.acl_group_remove_users('my_group', ['showard'])
>>> rpc_interface.acl_group_remove_hosts('my_group', ['host1'])
>>> data = rpc_interface.get_acl_groups(name='my_group')
>>> data[0]['users'], data[0]['hosts']
-(['debug_user'], [])
+([u'debug_user'], [])
>>> data = rpc_interface.get_acl_groups(users__login='showard')
>>> [acl_group['name'] for acl_group in data]
-['Everyone']
+[u'Everyone']
# note host has been automatically added back to 'Everyone'
>>> data = rpc_interface.get_acl_groups(hosts__hostname='host1')
>>> [acl_group['name'] for acl_group in data]
-['Everyone']
+[u'Everyone']
# host attributes
@@ -331,7 +331,7 @@
>>> rpc_interface.set_host_attribute('color', 'red', hostname='host1')
>>> data = rpc_interface.get_hosts(hostname='host1')
>>> data[0]['attributes']
-{'color': 'red'}
+{u'color': u'red'}
>>> rpc_interface.set_host_attribute('color', None, hostname='host1')
>>> data = rpc_interface.get_hosts(hostname='host1')
@@ -463,7 +463,7 @@
>>> data = rpc_interface.get_jobs()
>>> data = data[0]
>>> data['id'], data['owner'], data['name'], data['priority']
-(1, 'debug_user', 'my_job', 'Low')
+(1, u'debug_user', u'my_job', 'Low')
>>> data['control_file'] == cf_info['control_file']
True
>>> data['control_type']
@@ -552,13 +552,13 @@
>>> data = rpc_interface.get_jobs_summary()
>>> counts = data[0]['status_counts']
>>> counts
-{'Queued': 4}
+{u'Queued': 4}
# abort the job
>>> rpc_interface.abort_host_queue_entries(job__id=1)
>>> data = rpc_interface.get_jobs_summary(id=1)
>>> data[0]['status_counts']
-{'Aborted (Queued)': 4}
+{u'Aborted (Queued)': 4}
# Remove the two hosts in my_label
>>> rpc_interface.delete_host('my_label_host1')
@@ -572,15 +572,15 @@
# paging and a sort_by argument to specify the sort column
>>> data = rpc_interface.get_hosts(query_limit=1)
>>> [host['hostname'] for host in data]
-['host1']
+[u'host1']
>>> data = rpc_interface.get_hosts(query_start=1, query_limit=1)
>>> [host['hostname'] for host in data]
-['host2']
+[u'host2']
# sort_by = ['-hostname'] indicates sorting in descending order by hostname
>>> data = rpc_interface.get_hosts(sort_by=['-hostname'])
>>> [host['hostname'] for host in data]
-['host2', 'host1']
+[u'host2', u'host1']
# cloning a job
@@ -620,7 +620,7 @@
# get hosts ACL'd to a user
>>> hosts = rpc_interface.get_hosts(aclgroup__users__login='debug_user')
>>> sorted([host['hostname'] for host in hosts])
-['host1', 'host2']
+[u'host1', u'host2']
>>> rpc_interface.add_acl_group(name='mygroup')
3
@@ -628,22 +628,22 @@
>>> rpc_interface.acl_group_add_hosts('mygroup', ['host1'])
>>> data = rpc_interface.get_acl_groups(name='Everyone')[0]
>>> data['users'], data['hosts']
-(['debug_user', 'showard'], ['host2'])
+([u'debug_user', u'showard'], [u'host2'])
>>> data = rpc_interface.get_acl_groups(name='mygroup')[0]
>>> data['users'], data['hosts']
-(['debug_user'], ['host1'])
+([u'debug_user'], [u'host1'])
>>> hosts = rpc_interface.get_hosts(aclgroup__users__login='debug_user')
>>> sorted([host['hostname'] for host in hosts])
-['host1', 'host2']
+[u'host1', u'host2']
>>> hosts = rpc_interface.get_hosts(aclgroup__users__login='showard')
>>> [host['hostname'] for host in hosts]
-['host2']
+[u'host2']
>>> rpc_interface.delete_acl_group('mygroup')
>>> data = rpc_interface.get_acl_groups(name='Everyone')[0]
>>> sorted(data['hosts'])
-['host1', 'host2']
+[u'host1', u'host2']
# atomic groups
# #############
@@ -702,11 +702,11 @@
3
>>> hosts_in_two = rpc_interface.get_hosts(multiple_labels=['two-label'])
>>> list(sorted(h['hostname'] for h in hosts_in_two))
-['ah3-blue', 'ah4-blue', 'ahost1', 'ahost2']
+[u'ah3-blue', u'ah4-blue', u'ahost1', u'ahost2']
>>> rpc_interface.atomic_group_remove_labels(mini_rack_group_id, ['red-label'])
>>> ag_labels = rpc_interface.get_labels(atomic_group__name='mini rack')
>>> sorted(label['name'] for label in ag_labels)
-['one-label', 'two-label']
+[u'one-label', u'two-label']
>>> host_list = rpc_interface.get_hosts()
>>> hosts_by_name = {}
@@ -715,12 +715,12 @@
...
>>> hosts_by_name['host1']['atomic_group']
>>> hosts_by_name['ahost1']['atomic_group']
-'mini rack'
+u'mini rack'
>>> hosts_by_name['ah3-blue']['atomic_group']
-'mini rack'
+u'mini rack'
>>> host_list = rpc_interface.get_hosts(labels__atomic_group__name='mini rack')
>>> list(sorted(h['hostname'] for h in host_list))
-['ah3-blue', 'ah4-blue', 'ahost1', 'ahost2']
+[u'ah3-blue', u'ah4-blue', u'ahost1', u'ahost2']
@@ -797,7 +797,7 @@
... synch_count=2,
... atomic_group_name='mini rack')
Traceback (most recent call last):
-ValidationError: {'hosts': 'Hosts "host1, host2" are not in Atomic Group "mini rack"'}
+ValidationError: {'hosts': u'Hosts "host1, host2" are not in Atomic Group "mini rack"'}
# fail to create a job in an atomic group. not enough hosts due to meta_hosts.
>>> rpc_interface.create_job(name='my_atomic_job',
@@ -808,7 +808,7 @@
... synch_count=4,
... atomic_group_name='mini rack')
Traceback (most recent call last):
-ValidationError: {'atomic_group_name': 'Insufficient hosts in Atomic Group "mini rack" with the supplied dependencies and meta_hosts.'}
+ValidationError: {'atomic_group_name': u'Insufficient hosts in Atomic Group "mini rack" with the supplied dependencies and meta_hosts.'}
# fail to create a job in an atomic group. not enough hosts.
>>> rpc_interface.create_job(name='my_atomic_job',
@@ -818,7 +818,7 @@
... synch_count=5,
... atomic_group_name='mini rack')
Traceback (most recent call last):
-ValidationError: {'atomic_group_name': 'Insufficient hosts in Atomic Group "mini rack" with the supplied dependencies and meta_hosts.'}
+ValidationError: {'atomic_group_name': u'Insufficient hosts in Atomic Group "mini rack" with the supplied dependencies and meta_hosts.'}
# fail to create a job in an atomic group. Atomic Group not found.
>>> rpc_interface.create_job(name='my_atomic_job',
@@ -851,7 +851,7 @@
... control_type='Client',
... hosts=['ahost1', 'ahost2'])
Traceback (most recent call last):
-ValidationError: {'hosts': 'Host(s) "ahost1, ahost2" are atomic group hosts but no atomic group was specified for this job.'}
+ValidationError: {'hosts': u'Host(s) "ahost1, ahost2" are atomic group hosts but no atomic group was specified for this job.'}
# Create a job using a label in an atomic group as the meta-host but forget
# to specify the group. The frontend should figure this out for us.
@@ -875,7 +875,7 @@
... meta_hosts=['blue-label'],
... dependencies=['two-label'])
Traceback (most recent call last):
-ValidationError: {'atomic_group_name': "Dependency 'two-label' requires an atomic group but no atomic_group_name or meta_host in an atomic group was specified for this job."}
+ValidationError: {'atomic_group_name': "Dependency u'two-label' requires an atomic group but no atomic_group_name or meta_host in an atomic group was specified for this job."}
>>> invisible_group_id = rpc_interface.add_atomic_group(
... name='invisible rack',
@@ -890,7 +890,7 @@
... meta_hosts=['two-label'],
... atomic_group_name='invisible rack')
Traceback (most recent call last):
-ValidationError: {'atomic_group_name': "meta_hosts or dependency 'two-label' requires atomic group 'mini rack' instead of the supplied atomic_group_name='invisible rack'."}
+ValidationError: {'atomic_group_name': "meta_hosts or dependency u'two-label' requires atomic group u'mini rack' instead of the supplied atomic_group_name=u'invisible rack'."}
# we're done testing atomic groups, clean up
>>> rpc_interface.delete_atomic_group(invisible_group_id)
diff --git a/frontend/afe/doctests/003_misc_rpc_features.txt b/frontend/afe/doctests/003_misc_rpc_features.txt
index 3b2a19c..57f580f 100644
--- a/frontend/afe/doctests/003_misc_rpc_features.txt
+++ b/frontend/afe/doctests/003_misc_rpc_features.txt
@@ -2,7 +2,7 @@
# that would clutter the main rpc_test
# setup
->>> from frontend.afe import rpc_interface
+>>> from autotest_lib.frontend.afe import rpc_interface
>>> rpc_interface.add_profiler(name='oprofile')
1
@@ -14,7 +14,7 @@
... tests=['sleeptest'],
... profilers=['oprofile', 'iostat'])
>>> cf_info['control_file']
-"def step_init():\n job.next_step('step0')\n job.next_step('step1')\n job.next_step('step2')\n job.next_step('step3')\n job.next_step('step4')\n\ndef step0():\n job.profilers.add('oprofile')\n\ndef step1():\n job.profilers.add('iostat')\n\ndef step2():\n job.run_test('testname')\n\ndef step3():\n job.profilers.delete('oprofile')\n\ndef step4():\n job.profilers.delete('iostat')"
+u"def step_init():\n job.next_step('step0')\n job.next_step('step1')\n job.next_step('step2')\n job.next_step('step3')\n job.next_step('step4')\n\ndef step0():\n job.profilers.add('oprofile')\n\ndef step1():\n job.profilers.add('iostat')\n\ndef step2():\n job.run_test('testname')\n\ndef step3():\n job.profilers.delete('oprofile')\n\ndef step4():\n job.profilers.delete('iostat')"
# server-side control file generation
>>> rpc_interface.modify_test('sleeptest', test_type='Server')
diff --git a/frontend/afe/feeds/feed.py b/frontend/afe/feeds/feed.py
index d34a0b9..7479446 100644
--- a/frontend/afe/feeds/feed.py
+++ b/frontend/afe/feeds/feed.py
@@ -1,6 +1,6 @@
import django.http
from django.contrib.syndication import feeds
-from frontend.afe import models
+from autotest_lib.frontend.afe import models
# Copied from django/contrib/syndication/views.py. The default view doesn't
diff --git a/frontend/afe/json_rpc/proxy.py b/frontend/afe/json_rpc/proxy.py
index d0773c8..c39fd92 100644
--- a/frontend/afe/json_rpc/proxy.py
+++ b/frontend/afe/json_rpc/proxy.py
@@ -20,7 +20,7 @@
"""
import urllib2
-from frontend.afe.simplejson import decoder, encoder
+from autotest_lib.frontend.afe.simplejson import decoder, encoder
json_encoder = encoder.JSONEncoder()
json_decoder = decoder.JSONDecoder()
diff --git a/frontend/afe/json_rpc/serviceHandler.py b/frontend/afe/json_rpc/serviceHandler.py
index 7a80529..3936b3a 100644
--- a/frontend/afe/json_rpc/serviceHandler.py
+++ b/frontend/afe/json_rpc/serviceHandler.py
@@ -21,7 +21,7 @@
import traceback
-from frontend.afe.simplejson import decoder, encoder
+from autotest_lib.frontend.afe.simplejson import decoder, encoder
def customConvertJson(value):
"""\
diff --git a/frontend/afe/management.py b/frontend/afe/management.py
index 57e0855..4f6066c 100644
--- a/frontend/afe/management.py
+++ b/frontend/afe/management.py
@@ -1,11 +1,10 @@
# use some undocumented Django tricks to execute custom logic after syncdb
-from django.dispatch import dispatcher
from django.db.models import signals
from django.contrib import auth
# In this file, it is critical that we import models *just like this*. In
# particular, we *cannot* do import common; from autotest_lib... import models.
-# This is becasue when we pass the models module to dispatcher.connect(), it
+# This is because when we pass the models module to signal.connect(), it
# calls id() on the module, and the id() of a module can differ depending on how
# it was imported. For that reason, we must import models as Django does -- not
# through the autotest_lib magic set up through common.py. If you do that, the
@@ -43,5 +42,4 @@
print 'Group "%s" already exists' % BASIC_ADMIN
-dispatcher.connect(create_admin_group, sender=models,
- signal=signals.post_syncdb)
+signals.post_syncdb.connect(create_admin_group, sender=models)
diff --git a/frontend/afe/model_logic.py b/frontend/afe/model_logic.py
index c94233b..f8dc06d 100644
--- a/frontend/afe/model_logic.py
+++ b/frontend/afe/model_logic.py
@@ -2,13 +2,17 @@
Extensions to Django's model logic.
"""
+import re
+import django.core.exceptions
from django.db import models as dbmodels, backend, connection
+from django.db.models.sql import query
from django.utils import datastructures
from autotest_lib.frontend.afe import readonly_connection
+
class ValidationError(Exception):
"""\
- Data validation error in adding or updating an object. The associated
+ Data validation error in adding or updating an object. The associated
value is a dictionary mapping field names to error strings.
"""
@@ -24,6 +28,11 @@
return wrapper_method
+def _quote_name(name):
+ """Shorthand for connection.ops.quote_name()."""
+ return connection.ops.quote_name(name)
+
+
def _wrap_generator_with_readonly(generator):
"""
We have to wrap generators specially. Assume it performs
@@ -64,18 +73,19 @@
QuerySet object that performs all database queries with the read-only
connection.
"""
- def __init__(self, model=None):
- super(ReadonlyQuerySet, self).__init__(model)
+ def __init__(self, model=None, *args, **kwargs):
+ super(ReadonlyQuerySet, self).__init__(model, *args, **kwargs)
_make_queryset_readonly(self)
def values(self, *fields):
- return self._clone(klass=ReadonlyValuesQuerySet, _fields=fields)
+ return self._clone(klass=ReadonlyValuesQuerySet,
+ setup=True, _fields=fields)
class ReadonlyValuesQuerySet(dbmodels.query.ValuesQuerySet):
- def __init__(self, model=None):
- super(ReadonlyValuesQuerySet, self).__init__(model)
+ def __init__(self, model=None, *args, **kwargs):
+ super(ReadonlyValuesQuerySet, self).__init__(model, *args, **kwargs)
_make_queryset_readonly(self)
@@ -84,57 +94,33 @@
Extended manager supporting subquery filtering.
"""
- class _CustomJoinQ(dbmodels.Q):
- """
- Django "Q" object supporting a custom suffix for join aliases.See
- filter_custom_join() for why this can be useful.
- """
+ class _CustomQuery(query.Query):
+ def clone(self, klass=None, **kwargs):
+ obj = super(ExtendedManager._CustomQuery, self).clone(
+ klass, _customSqlQ=self._customSqlQ)
- def __init__(self, join_suffix, **kwargs):
- super(ExtendedManager._CustomJoinQ, self).__init__(**kwargs)
- self._join_suffix = join_suffix
+ customQ = kwargs.get('_customSqlQ', None)
+ if customQ is not None:
+ obj._customSqlQ._joins.update(customQ._joins)
+ obj._customSqlQ._where.extend(customQ._where)
+ obj._customSqlQ._params.extend(customQ._params)
+ return obj
- @staticmethod
- def _substitute_aliases(renamed_aliases, condition):
- for old_alias, new_alias in renamed_aliases:
- condition = condition.replace(backend.quote_name(old_alias),
- backend.quote_name(new_alias))
- return condition
+ def get_from_clause(self):
+ from_, params = super(
+ ExtendedManager._CustomQuery, self).get_from_clause()
+ join_clause = ''
+ for join_alias, join in self._customSqlQ._joins.iteritems():
+ join_table, join_type, condition = join
+ join_clause += ' %s %s AS %s ON (%s)' % (
+ join_type, join_table, join_alias, condition)
- @staticmethod
- def _unquote_name(name):
- 'This may be MySQL specific'
- if backend.quote_name(name) == name:
- return name[1:-1]
- return name
+ if join_clause:
+ from_.append(join_clause)
-
- def get_sql(self, opts):
- joins, where, params = (
- super(ExtendedManager._CustomJoinQ, self).get_sql(opts))
-
- new_joins = datastructures.SortedDict()
-
- # rename all join aliases and correct references in later joins
- renamed_tables = []
- # using iteritems seems to mess up the ordering here
- for alias, (table, join_type, condition) in joins.items():
- alias = self._unquote_name(alias)
- new_alias = alias + self._join_suffix
- renamed_tables.append((alias, new_alias))
- condition = self._substitute_aliases(renamed_tables, condition)
- new_alias = backend.quote_name(new_alias)
- new_joins[new_alias] = (table, join_type, condition)
-
- # correct references in where
- new_where = []
- for clause in where:
- new_where.append(
- self._substitute_aliases(renamed_tables, clause))
-
- return new_joins, new_where, params
+ return from_, params
class _CustomSqlQ(dbmodels.Q):
@@ -154,8 +140,22 @@
self._params.extend(params)
- def get_sql(self, opts):
- return self._joins, self._where, self._params
+ def add_to_query(self, query, aliases):
+ if self._where:
+ where = ' AND '.join(self._where)
+ query.add_extra(None, None, (where,), self._params, None, None)
+
+
+ def _add_customSqlQ(self, query_set, filter_object):
+ """\
+ Add a _CustomSqlQ to the query set.
+ """
+ # Make a copy of the query set
+ query_set = query_set.all()
+
+ query_set.query = query_set.query.clone(
+ ExtendedManager._CustomQuery, _customSqlQ=filter_object)
+ return query_set.filter(filter_object)
def add_join(self, query_set, join_table, join_key,
@@ -168,8 +168,8 @@
@param join_condition extra condition for the ON clause of the join
@param suffix suffix to add to join_table for the join alias
@param exclude if true, exclude rows that match this join (will use a
- LEFT JOIN and an appropriate WHERE condition)
- @param force_left_join - if true, a LEFT JOIN will be used instead of an
+ LEFT OUTER JOIN and an appropriate WHERE condition)
+ @param force_left_join - if true, a LEFT OUTER JOIN will be used instead of an
INNER JOIN regardless of other options
"""
join_from_table = self.model._meta.db_table
@@ -181,9 +181,9 @@
if join_condition:
full_join_condition += ' AND (' + join_condition + ')'
if exclude or force_left_join:
- join_type = 'LEFT JOIN'
+ join_type = query_set.query.LOUTER
else:
- join_type = 'INNER JOIN'
+ join_type = query_set.query.INNER
filter_object = self._CustomSqlQ()
filter_object.add_join(join_table,
@@ -192,22 +192,13 @@
alias=join_alias)
if exclude:
filter_object.add_where(full_join_key + ' IS NULL')
- return query_set.filter(filter_object).distinct()
-
- def filter_custom_join(self, join_suffix, **kwargs):
- """
- Just like Django filter(), but allows the user to specify a custom
- suffix for the join aliases involves in the filter. This makes it
- possible to join against a table multiple times (as long as a different
- suffix is used each time), which is necessary for certain queries.
- """
- filter_object = self._CustomJoinQ(join_suffix, **kwargs)
- return self.complex_filter(filter_object)
+ query_set = self._add_customSqlQ(query_set, filter_object)
+ return query_set.distinct()
def _get_quoted_field(self, table, field):
- return (backend.quote_name(table) + '.' + backend.quote_name(field))
+ return _quote_name(table) + '.' + _quote_name(field)
def get_key_on_this_table(self, key_field=None):
@@ -222,12 +213,15 @@
def _custom_select_query(self, query_set, selects):
- query_selects, where, params = query_set._get_sql_clause()
- if query_set._distinct:
+ sql, params = query_set.query.as_sql()
+ from_ = sql[sql.find(' FROM'):]
+
+ if query_set.query.distinct:
distinct = 'DISTINCT '
else:
distinct = ''
- sql_query = 'SELECT ' + distinct + ','.join(selects) + where
+
+ sql_query = ('SELECT ' + distinct + ','.join(selects) + from_)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql_query, params)
return cursor.fetchall()
@@ -549,8 +543,36 @@
return errors
+ def _validate(self):
+ """
+ First coerces all fields on this instance to their proper Python types.
+ Then runs validation on every field. Returns a dictionary of
+ field_name -> error_list.
+
+ Based on validate() from django.db.models.Model in Django 0.96, which
+ was removed in Django 1.0. It should reappear in a later version. See:
+ http://code.djangoproject.com/ticket/6845
+ """
+ error_dict = {}
+ for f in self._meta.fields:
+ try:
+ python_value = f.to_python(
+ getattr(self, f.attname, f.get_default()))
+ except django.core.exceptions.ValidationError, e:
+ error_dict[f.name] = str(e.message)
+ continue
+
+ if not f.blank and not python_value:
+ error_dict[f.name] = 'This field is required.'
+ continue
+
+ setattr(self, f.attname, python_value)
+
+ return error_dict
+
+
def do_validate(self):
- errors = self.validate()
+ errors = self._validate()
unique_errors = self.validate_unique()
for field_name, error in unique_errors.iteritems():
errors.setdefault(field_name, error)
@@ -600,7 +622,7 @@
field name changes the sort to descending order.
-extra_args: keyword args to pass to query.extra() (see Django
DB layer documentation)
- -extra_where: extra WHERE clause to append
+ -extra_where: extra WHERE clause to append
"""
filter_data = dict(filter_data) # copy so we don't mutate the original
query_start = filter_data.pop('query_start', None)
@@ -774,7 +796,7 @@
field.
"""
- def save(self):
+ def save(self, *args, **kwargs):
first_time = (self.id is None)
if first_time:
# see if this object was previously added and invalidated
@@ -787,7 +809,7 @@
# no existing object
pass
- super(ModelWithInvalid, self).save()
+ super(ModelWithInvalid, self).save(*args, **kwargs)
def clean_object(self):
diff --git a/frontend/afe/models.py b/frontend/afe/models.py
index 51ee712..d79d382 100644
--- a/frontend/afe/models.py
+++ b/frontend/afe/models.py
@@ -35,7 +35,7 @@
Optional:
description: Arbitrary text description of this group's purpose.
"""
- name = dbmodels.CharField(maxlength=255, unique=True)
+ name = dbmodels.CharField(max_length=255, unique=True)
description = dbmodels.TextField(blank=True)
# This magic value is the default to simplify the scheduler logic.
# It must be "large". The common use of atomic groups is to want all
@@ -44,7 +44,7 @@
INFINITE_MACHINES = 333333333
max_number_of_machines = dbmodels.IntegerField(default=INFINITE_MACHINES)
invalid = dbmodels.BooleanField(default=False,
- editable=settings.FULL_ADMIN)
+ editable=settings.FULL_ADMIN)
name_field = 'name'
objects = model_logic.ExtendedManager()
@@ -65,13 +65,9 @@
class Meta:
db_table = 'atomic_groups'
- class Admin:
- list_display = ('name', 'description', 'max_number_of_machines')
- # see Host.Admin
- manager = model_logic.ValidObjectsManager()
- def __str__(self):
- return self.name
+ def __unicode__(self):
+ return unicode(self.name)
class Label(model_logic.ModelWithInvalid, dbmodels.Model):
@@ -87,8 +83,8 @@
in the job_dependencies).
atomic_group: The atomic group associated with this label.
"""
- name = dbmodels.CharField(maxlength=255, unique=True)
- kernel_config = dbmodels.CharField(maxlength=255, blank=True)
+ name = dbmodels.CharField(max_length=255, unique=True)
+ kernel_config = dbmodels.CharField(max_length=255, blank=True)
platform = dbmodels.BooleanField(default=False)
invalid = dbmodels.BooleanField(default=False,
editable=settings.FULL_ADMIN)
@@ -116,13 +112,8 @@
class Meta:
db_table = 'labels'
- class Admin:
- list_display = ('name', 'kernel_config')
- # see Host.Admin
- manager = model_logic.ValidObjectsManager()
-
- def __str__(self):
- return self.name
+ def __unicode__(self):
+ return unicode(self.name)
class User(dbmodels.Model, model_logic.ModelExtensions):
@@ -137,7 +128,7 @@
ACCESS_ADMIN = 1
ACCESS_USER = 0
- login = dbmodels.CharField(maxlength=255, unique=True)
+ login = dbmodels.CharField(max_length=255, unique=True)
access_level = dbmodels.IntegerField(default=ACCESS_USER, blank=True)
# user preferences
@@ -153,13 +144,13 @@
objects = model_logic.ExtendedManager()
- def save(self):
+ def save(self, *args, **kwargs):
# is this a new object being saved for the first time?
first_time = (self.id is None)
user = thread_local.get_user()
if user and not user.is_superuser() and user.login != self.login:
raise AclAccessViolation("You cannot modify user " + self.login)
- super(User, self).save()
+ super(User, self).save(*args, **kwargs)
if first_time:
everyone = AclGroup.objects.get(name='Everyone')
everyone.users.add(self)
@@ -172,12 +163,8 @@
class Meta:
db_table = 'users'
- class Admin:
- list_display = ('login', 'access_level')
- search_fields = ('login',)
-
- def __str__(self):
- return self.login
+ def __unicode__(self):
+ return unicode(self.login)
class Host(model_logic.ModelWithInvalid, dbmodels.Model,
@@ -202,13 +189,12 @@
'Repair Failed', 'Dead', 'Cleaning', 'Pending',
string_values=True)
- hostname = dbmodels.CharField(maxlength=255, unique=True)
- labels = dbmodels.ManyToManyField(Label, blank=True,
- filter_interface=dbmodels.HORIZONTAL)
+ hostname = dbmodels.CharField(max_length=255, unique=True)
+ labels = dbmodels.ManyToManyField(Label, blank=True)
locked = dbmodels.BooleanField(default=False)
synch_id = dbmodels.IntegerField(blank=True, null=True,
editable=settings.FULL_ADMIN)
- status = dbmodels.CharField(maxlength=255, default=Status.READY,
+ status = dbmodels.CharField(max_length=255, default=Status.READY,
choices=Status.choices(),
editable=settings.FULL_ADMIN)
invalid = dbmodels.BooleanField(default=False,
@@ -257,7 +243,7 @@
self.labels.clear()
- def save(self):
+ def save(self, *args, **kwargs):
# extra spaces in the hostname can be a sneaky source of errors
self.hostname = self.hostname.strip()
# is this a new object being saved for the first time?
@@ -271,7 +257,7 @@
elif not self.locked and self.locked_by:
self.locked_by = None
self.lock_time = None
- super(Host, self).save()
+ super(Host, self).save(*args, **kwargs)
if first_time:
everyone = AclGroup.objects.get(name='Everyone')
everyone.hosts.add(self)
@@ -351,26 +337,15 @@
class Meta:
db_table = 'hosts'
- class Admin:
- # TODO(showard) - showing platform requires a SQL query for
- # each row (since labels are many-to-many) - should we remove
- # it?
- list_display = ('hostname', 'platform', 'locked', 'status')
- list_filter = ('labels', 'locked', 'protection')
- search_fields = ('hostname', 'status')
- # undocumented Django feature - if you set manager here, the
- # admin code will use it, otherwise it'll use a default Manager
- manager = model_logic.ValidObjectsManager()
-
- def __str__(self):
- return self.hostname
+ def __unicode__(self):
+ return unicode(self.hostname)
class HostAttribute(dbmodels.Model):
"""Arbitrary keyvals associated with hosts."""
host = dbmodels.ForeignKey(Host)
- attribute = dbmodels.CharField(maxlength=90)
- value = dbmodels.CharField(maxlength=300)
+ attribute = dbmodels.CharField(max_length=90)
+ value = dbmodels.CharField(max_length=300)
objects = model_logic.ExtendedManager()
@@ -405,11 +380,11 @@
# now they use opposite values)
Types = enum.Enum('Client', 'Server', start_value=1)
- name = dbmodels.CharField(maxlength=255, unique=True)
- author = dbmodels.CharField(maxlength=255)
- test_class = dbmodels.CharField(maxlength=255)
- test_category = dbmodels.CharField(maxlength=255)
- dependencies = dbmodels.CharField(maxlength=255, blank=True)
+ name = dbmodels.CharField(max_length=255, unique=True)
+ author = dbmodels.CharField(max_length=255)
+ test_class = dbmodels.CharField(max_length=255)
+ test_category = dbmodels.CharField(max_length=255)
+ dependencies = dbmodels.CharField(max_length=255, blank=True)
description = dbmodels.TextField(blank=True)
experimental = dbmodels.BooleanField(default=True)
run_verify = dbmodels.BooleanField(default=True)
@@ -417,9 +392,8 @@
default=TestTime.MEDIUM)
test_type = dbmodels.SmallIntegerField(choices=Types.choices())
sync_count = dbmodels.IntegerField(default=1)
- path = dbmodels.CharField(maxlength=255, unique=True)
- dependency_labels = dbmodels.ManyToManyField(
- Label, blank=True, filter_interface=dbmodels.HORIZONTAL)
+ path = dbmodels.CharField(max_length=255, unique=True)
+ dependency_labels = dbmodels.ManyToManyField(Label, blank=True)
name_field = 'name'
objects = model_logic.ExtendedManager()
@@ -428,19 +402,8 @@
class Meta:
db_table = 'autotests'
- class Admin:
- fields = (
- (None, {'fields' :
- ('name', 'author', 'test_category', 'test_class',
- 'test_time', 'sync_count', 'test_type', 'path',
- 'dependencies', 'experimental', 'run_verify',
- 'description')}),
- )
- list_display = ('name', 'test_type', 'description', 'sync_count')
- search_fields = ('name',)
-
- def __str__(self):
- return self.name
+ def __unicode__(self):
+ return unicode(self.name)
class Profiler(dbmodels.Model, model_logic.ModelExtensions):
@@ -452,7 +415,7 @@
Optional:
description: arbirary text description
"""
- name = dbmodels.CharField(maxlength=255, unique=True)
+ name = dbmodels.CharField(max_length=255, unique=True)
description = dbmodels.TextField(blank=True)
name_field = 'name'
@@ -462,12 +425,8 @@
class Meta:
db_table = 'profilers'
- class Admin:
- list_display = ('name', 'description')
- search_fields = ('name',)
-
- def __str__(self):
- return self.name
+ def __unicode__(self):
+ return unicode(self.name)
class AclGroup(dbmodels.Model, model_logic.ModelExtensions):
@@ -478,12 +437,10 @@
Optional:
description: arbitrary description of group
"""
- name = dbmodels.CharField(maxlength=255, unique=True)
- description = dbmodels.CharField(maxlength=255, blank=True)
- users = dbmodels.ManyToManyField(User, blank=True,
- filter_interface=dbmodels.HORIZONTAL)
- hosts = dbmodels.ManyToManyField(Host,
- filter_interface=dbmodels.HORIZONTAL)
+ name = dbmodels.CharField(max_length=255, unique=True)
+ description = dbmodels.CharField(max_length=255, blank=True)
+ users = dbmodels.ManyToManyField(User, blank=True)
+ hosts = dbmodels.ManyToManyField(Host)
name_field = 'name'
objects = model_logic.ExtendedManager()
@@ -519,7 +476,9 @@
return
not_owned = queue_entries.exclude(job__owner=user.login)
# I do this using ID sets instead of just Django filters because
- # filtering on M2M fields is broken in Django 0.96. It's better in 1.0.
+ # filtering on M2M dbmodels is broken in Django 0.96. It's better in
+ # 1.0.
+ # TODO: Use Django filters, now that we're using 1.0.
accessible_ids = set(
entry.id for entry
in not_owned.filter(host__aclgroup__users__login=user.login))
@@ -559,10 +518,13 @@
# find hosts in both Everyone and another ACL group, and remove them
# from Everyone
- hosts_in_everyone = Host.valid_objects.filter_custom_join(
- '_everyone', aclgroup__name='Everyone')
- acled_hosts = hosts_in_everyone.exclude(aclgroup__name='Everyone')
- everyone.hosts.remove(*acled_hosts.distinct())
+ hosts_in_everyone = Host.valid_objects.filter(aclgroup__name='Everyone')
+ acled_hosts = set()
+ for host in hosts_in_everyone:
+ # Has an ACL group other than Everyone
+ if host.aclgroup_set.count() > 1:
+ acled_hosts.add(host)
+ everyone.hosts.remove(*acled_hosts)
def delete(self):
@@ -586,14 +548,15 @@
Custom manipulator to get notification when ACLs are changed through
the admin interface.
"""
- def save(self, new_data):
+ def save(self, new_data, *args, **kwargs):
user = thread_local.get_user()
if hasattr(self, 'original_object'):
if (not user.is_superuser()
and self.original_object.name == 'Everyone'):
raise AclAccessViolation("You cannot modify 'Everyone'!")
self.original_object.check_for_acl_violation_acl_group()
- obj = super(AclGroup.Manipulator, self).save(new_data)
+ obj = super(AclGroup.Manipulator, self).save(new_data,
+ *args, **kwargs)
if not hasattr(self, 'original_object'):
obj.users.add(thread_local.get_user())
obj.add_current_user_if_empty()
@@ -603,12 +566,8 @@
class Meta:
db_table = 'acl_groups'
- class Admin:
- list_display = ('name', 'description')
- search_fields = ('name',)
-
- def __str__(self):
- return self.name
+ def __unicode__(self):
+ return unicode(self.name)
class JobManager(model_logic.ExtendedManager):
@@ -671,8 +630,8 @@
Priority = enum.Enum('Low', 'Medium', 'High', 'Urgent')
ControlType = enum.Enum('Server', 'Client', start_value=1)
- owner = dbmodels.CharField(maxlength=255)
- name = dbmodels.CharField(maxlength=255)
+ owner = dbmodels.CharField(max_length=255)
+ name = dbmodels.CharField(max_length=255)
priority = dbmodels.SmallIntegerField(choices=Priority.choices(),
blank=True, # to allow 0
default=Priority.MEDIUM)
@@ -684,9 +643,8 @@
synch_count = dbmodels.IntegerField(null=True, default=1)
timeout = dbmodels.IntegerField(default=DEFAULT_TIMEOUT)
run_verify = dbmodels.BooleanField(default=True)
- email_list = dbmodels.CharField(maxlength=250, blank=True)
- dependency_labels = dbmodels.ManyToManyField(
- Label, blank=True, filter_interface=dbmodels.HORIZONTAL)
+ email_list = dbmodels.CharField(max_length=250, blank=True)
+ dependency_labels = dbmodels.ManyToManyField(Label, blank=True)
reboot_before = dbmodels.SmallIntegerField(choices=RebootBefore.choices(),
blank=True,
default=DEFAULT_REBOOT_BEFORE)
@@ -768,12 +726,8 @@
class Meta:
db_table = 'jobs'
- if settings.FULL_ADMIN:
- class Admin:
- list_display = ('id', 'owner', 'name', 'control_type')
-
- def __str__(self):
- return '%s (%s-%s)' % (self.name, self.id, self.owner)
+ def __unicode__(self):
+ return u'%s (%s-%s)' % (self.name, self.id, self.owner)
class IneligibleHostQueue(dbmodels.Model, model_logic.ModelExtensions):
@@ -785,10 +739,6 @@
class Meta:
db_table = 'ineligible_host_queues'
- if settings.FULL_ADMIN:
- class Admin:
- list_display = ('id', 'job', 'host')
-
class HostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
Status = enum.Enum('Queued', 'Starting', 'Verifying', 'Pending', 'Running',
@@ -801,13 +751,14 @@
job = dbmodels.ForeignKey(Job)
host = dbmodels.ForeignKey(Host, blank=True, null=True)
- status = dbmodels.CharField(maxlength=255)
+ status = dbmodels.CharField(max_length=255)
meta_host = dbmodels.ForeignKey(Label, blank=True, null=True,
db_column='meta_host')
active = dbmodels.BooleanField(default=False)
complete = dbmodels.BooleanField(default=False)
deleted = dbmodels.BooleanField(default=False)
- execution_subdir = dbmodels.CharField(maxlength=255, blank=True, default='')
+ execution_subdir = dbmodels.CharField(max_length=255, blank=True,
+ default='')
# If atomic_group is set, this is a virtual HostQueueEntry that will
# be expanded into many actual hosts within the group at schedule time.
atomic_group = dbmodels.ForeignKey(AtomicGroup, blank=True, null=True)
@@ -834,9 +785,9 @@
atomic_group=atomic_group, status=status)
- def save(self):
+ def save(self, *args, **kwargs):
self._set_active_and_complete()
- super(HostQueueEntry, self).save()
+ super(HostQueueEntry, self).save(*args, **kwargs)
self._check_for_updated_attributes()
@@ -912,17 +863,12 @@
db_table = 'host_queue_entries'
- if settings.FULL_ADMIN:
- class Admin:
- list_display = ('id', 'job', 'host', 'status',
- 'meta_host')
-
- def __str__(self):
+ def __unicode__(self):
hostname = None
if self.host:
hostname = self.host.hostname
- return "%s/%d (%d)" % (hostname, self.job.id, self.id)
+ return u"%s/%d (%d)" % (hostname, self.job.id, self.id)
class AbortedHostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
@@ -933,9 +879,9 @@
objects = model_logic.ExtendedManager()
- def save(self):
+ def save(self, *args, **kwargs):
self.aborted_on = datetime.now()
- super(AbortedHostQueueEntry, self).save()
+ super(AbortedHostQueueEntry, self).save(*args, **kwargs)
class Meta:
db_table = 'aborted_host_queue_entries'
@@ -962,8 +908,8 @@
class Meta:
db_table = 'recurring_run'
- def __str__(self):
- return 'RecurringRun(job %s, start %s, period %s, count %s)' % (
+ def __unicode__(self):
+ return u'RecurringRun(job %s, start %s, period %s, count %s)' % (
self.job.id, self.start_date, self.loop_period, self.loop_count)
@@ -984,7 +930,7 @@
Task = enum.Enum('Verify', 'Cleanup', 'Repair', string_values=True)
host = dbmodels.ForeignKey(Host, blank=False, null=False)
- task = dbmodels.CharField(maxlength=64, choices=Task.choices(),
+ task = dbmodels.CharField(max_length=64, choices=Task.choices(),
blank=False, null=False)
time_requested = dbmodels.DateTimeField(auto_now_add=True, blank=False,
null=False)
@@ -1049,8 +995,15 @@
if not hasattr(agent, 'TASK_TYPE'):
raise ValueError("Can only prepare special tasks for "
"verify, cleanup, or repair")
- task = cls.objects.create(host=agent.host, task=agent.TASK_TYPE,
- queue_entry=agent.queue_entry)
+
+ host = Host.objects.get(id=agent.host.id)
+ queue_entry = None
+ if agent.queue_entry:
+ queue_entry = (
+ HostQueueEntry.objects.get(id=agent.queue_entry.id))
+
+ task = cls.objects.create(host=host, task=agent.TASK_TYPE,
+ queue_entry=queue_entry)
return task
@@ -1078,12 +1031,12 @@
class Meta:
db_table = 'special_tasks'
- def __str__(self):
- result = 'Special Task %s (host %s, task %s, time %s)' % (
+ def __unicode__(self):
+ result = u'Special Task %s (host %s, task %s, time %s)' % (
self.id, self.host, self.task, self.time_requested)
if self.is_complete:
- result += ' (completed)'
+ result += u' (completed)'
elif self.is_active:
- result += ' (active)'
+ result += u' (active)'
return result
diff --git a/frontend/afe/models_test.py b/frontend/afe/models_test.py
index e151df0..df9e964 100644
--- a/frontend/afe/models_test.py
+++ b/frontend/afe/models_test.py
@@ -6,6 +6,38 @@
from autotest_lib.frontend.afe import frontend_test_utils
from autotest_lib.frontend.afe import models
+
+class AclGroupTest(unittest.TestCase,
+ frontend_test_utils.FrontendTestMixin):
+ def setUp(self):
+ self._frontend_common_setup()
+
+
+ def tearDown(self):
+ self._frontend_common_teardown()
+
+
+ def _check_acls(self, host, acl_name_list):
+ actual_acl_names = [acl_group.name for acl_group
+ in host.aclgroup_set.all()]
+ self.assertEquals(set(actual_acl_names), set(acl_name_list))
+
+
+ def test_on_host_membership_change(self):
+ host1, host2 = self.hosts[1:3]
+ everyone_acl = models.AclGroup.objects.get(name='Everyone')
+
+ host1.aclgroup_set.clear()
+ self._check_acls(host1, [])
+ host2.aclgroup_set.add(everyone_acl)
+ self._check_acls(host2, ['Everyone', 'my_acl'])
+
+ models.AclGroup.on_host_membership_change()
+
+ self._check_acls(host1, ['Everyone'])
+ self._check_acls(host2, ['my_acl'])
+
+
class SpecialTaskUnittest(unittest.TestCase,
frontend_test_utils.FrontendTestMixin):
def setUp(self):
diff --git a/frontend/afe/readonly_connection.py b/frontend/afe/readonly_connection.py
index 31af595..7d59aae 100644
--- a/frontend/afe/readonly_connection.py
+++ b/frontend/afe/readonly_connection.py
@@ -1,6 +1,5 @@
from django.db import connection as django_connection
from django.conf import settings
-from django.dispatch import dispatcher
from django.core import signals
class ReadOnlyConnection(object):
@@ -129,6 +128,6 @@
# close any open connection when request finishes
-def _close_connection():
+def _close_connection(**unused_kwargs):
connection().close()
-dispatcher.connect(_close_connection, signal=signals.request_finished)
+signals.request_finished.connect(_close_connection)
diff --git a/frontend/afe/rpc_handler.py b/frontend/afe/rpc_handler.py
index 56202bc..0ed01c8 100644
--- a/frontend/afe/rpc_handler.py
+++ b/frontend/afe/rpc_handler.py
@@ -6,8 +6,8 @@
__author__ = 'showard@google.com (Steve Howard)'
import traceback, pydoc, re, urllib
-from frontend.afe.json_rpc import serviceHandler
-from frontend.afe import rpc_utils
+from autotest_lib.frontend.afe.json_rpc import serviceHandler
+from autotest_lib.frontend.afe import rpc_utils
class RpcMethodHolder(object):
diff --git a/frontend/afe/rpc_interface_unittest.py b/frontend/afe/rpc_interface_unittest.py
index 2f13e62..b298029 100644
--- a/frontend/afe/rpc_interface_unittest.py
+++ b/frontend/afe/rpc_interface_unittest.py
@@ -22,6 +22,19 @@
self._frontend_common_teardown()
+ def test_validation(self):
+ # non-number for a numeric field
+ self.assertRaises(model_logic.ValidationError,
+ rpc_interface.add_atomic_group, name='foo',
+ max_number_of_machines='bar')
+ # omit a required field
+ self.assertRaises(model_logic.ValidationError, rpc_interface.add_label,
+ name=None)
+ # violate uniqueness constraint
+ self.assertRaises(model_logic.ValidationError, rpc_interface.add_host,
+ hostname='host1')
+
+
def test_multiple_platforms(self):
platform2 = models.Label.objects.create(name='platform2', platform=True)
self.assertRaises(model_logic.ValidationError,
@@ -36,6 +49,33 @@
self.assertEquals(platforms[0]['name'], 'myplatform')
+ def _check_hostnames(self, hosts, expected_hostnames):
+ self.assertEquals(set(host['hostname'] for host in hosts),
+ set(expected_hostnames))
+
+
+ def test_get_hosts(self):
+ hosts = rpc_interface.get_hosts()
+ self._check_hostnames(hosts, [host.hostname for host in self.hosts])
+
+ hosts = rpc_interface.get_hosts(hostname='host1')
+ self._check_hostnames(hosts, ['host1'])
+
+
+ def test_get_hosts_multiple_labels(self):
+ hosts = rpc_interface.get_hosts(
+ multiple_labels=['myplatform', 'label1'])
+ self._check_hostnames(hosts, ['host1'])
+
+
+ def test_get_hosts_exclude_only_if_needed(self):
+ self.hosts[0].labels.add(self.label3)
+
+ hosts = rpc_interface.get_hosts(hostname__in=['host1', 'host2'],
+ exclude_only_if_needed_labels=True)
+ self._check_hostnames(hosts, ['host2'])
+
+
def test_get_jobs_summary(self):
job = self._create_job(hosts=xrange(1, 4))
entries = list(job.hostqueueentry_set.all())
@@ -52,9 +92,13 @@
'Failed': 2})
+ def _create_job_helper(self, **kwargs):
+ return rpc_interface.create_job('test', 'Medium', 'control file',
+ 'Server', **kwargs)
+
+
def test_one_time_hosts(self):
- job = rpc_interface.create_job('test', 'Medium', 'control file',
- 'Server', one_time_hosts=['testhost'])
+ job = self._create_job_helper(one_time_hosts=['testhost'])
host = models.Host.objects.get(hostname='testhost')
self.assertEquals(host.invalid, True)
self.assertEquals(host.labels.count(), 0)
@@ -141,5 +185,10 @@
self.assertEquals(entry2['started_on'], '2009-01-03 00:00:00')
+ def _create_job_helper(self, **kwargs):
+ return rpc_interface.create_job('test', 'Medium', 'control file',
+ 'Server', **kwargs)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/frontend/afe/rpc_utils.py b/frontend/afe/rpc_utils.py
index c4003b1..548556b 100644
--- a/frontend/afe/rpc_utils.py
+++ b/frontend/afe/rpc_utils.py
@@ -7,7 +7,7 @@
import datetime, os
import django.http
-from frontend.afe import models, model_logic
+from autotest_lib.frontend.afe import models, model_logic
NULL_DATETIME = datetime.datetime.max
NULL_DATE = datetime.date.max
@@ -201,8 +201,7 @@
hosts_in_job = models.Host.objects.filter(id__in=host_ids)
ok_hosts = hosts_in_job
for index, dependency in enumerate(job_dependencies):
- ok_hosts &= models.Host.objects.filter_custom_join(
- '_label%d' % index, labels__name=dependency)
+ ok_hosts = ok_hosts.filter(labels__name=dependency)
failing_hosts = (set(host.hostname for host in host_objects) -
set(host.hostname for host in ok_hosts))
if failing_hosts:
diff --git a/frontend/afe/test.py b/frontend/afe/test.py
index 9b8f57d..431272e 100644
--- a/frontend/afe/test.py
+++ b/frontend/afe/test.py
@@ -1,7 +1,8 @@
import common
import os, doctest, glob, sys
-import django.test.utils, django.test.simple
from django.conf import settings
+from django.db import connection
+import django.test.utils
# doctest takes a copy+paste log of a Python interactive session, runs a Python
# interpreter, and replays all the inputs from the log, checking that the
@@ -56,7 +57,7 @@
total_errors = 0
old_db = settings.DATABASE_NAME
django.test.utils.setup_test_environment()
- django.test.utils.create_test_db()
+ connection.creation.create_test_db()
try:
for module in modules:
failures, test_count = doctest.testmod(module)
@@ -68,7 +69,7 @@
print self._PRINT_AFTER % (test_count, path)
total_errors += failures
finally:
- django.test.utils.destroy_test_db(old_db)
+ connection.creation.destroy_test_db(old_db)
django.test.utils.teardown_test_environment()
print
if total_errors == 0:
diff --git a/frontend/afe/urls.py b/frontend/afe/urls.py
index 09bff6c..78cd5fb 100644
--- a/frontend/afe/urls.py
+++ b/frontend/afe/urls.py
@@ -1,15 +1,16 @@
from django.conf.urls.defaults import *
import os
-from frontend import settings
-from frontend.afe.feeds import feed
+from autotest_lib.frontend import settings
+from autotest_lib.frontend.afe.feeds import feed
feeds = {
'jobs' : feed.JobFeed
}
-pattern_list = [(r'^(?:|noauth/)rpc/', 'frontend.afe.views.handle_rpc'),
- (r'^rpc_doc', 'frontend.afe.views.rpc_documentation'),
- ]
+pattern_list = [
+ (r'^(?:|noauth/)rpc/', 'frontend.afe.views.handle_rpc'),
+ (r'^rpc_doc', 'frontend.afe.views.rpc_documentation'),
+ ]
debug_pattern_list = [
(r'^model_doc/', 'frontend.afe.views.model_documentation'),
diff --git a/frontend/afe/views.py b/frontend/afe/views.py
index fa05517..499905d 100644
--- a/frontend/afe/views.py
+++ b/frontend/afe/views.py
@@ -1,7 +1,7 @@
import urllib2, sys, traceback, cgi
-from frontend.afe import models, rpc_handler, rpc_interface, site_rpc_interface
-from frontend.afe import rpc_utils
+from autotest_lib.frontend.afe import models, rpc_handler, rpc_interface
+from autotest_lib.frontend.afe import site_rpc_interface, rpc_utils
from django.http import HttpResponse, HttpResponsePermanentRedirect
from django.http import HttpResponseServerError
from django.template import Context, loader
diff --git a/frontend/apache_auth.py b/frontend/apache_auth.py
index 481e367..4ed1471 100644
--- a/frontend/apache_auth.py
+++ b/frontend/apache_auth.py
@@ -1,13 +1,14 @@
from django.contrib.auth.models import User, Group, check_password
+from django.contrib.auth import backends
from django.contrib import auth
from django import http
-from frontend import thread_local
-from frontend.afe import models, management
+from autotest_lib.frontend import thread_local
+from autotest_lib.frontend.afe import models, management
DEBUG_USER = 'debug_user'
-class SimpleAuthBackend:
+class SimpleAuthBackend(backends.ModelBackend):
"""
Automatically allows any login. This backend is for use when Apache is
doing the real authentication. Also ensures logged-in user exists in
diff --git a/frontend/client/src/autotest/afe/HostDetailView.java b/frontend/client/src/autotest/afe/HostDetailView.java
index c3e3650..d75a693 100644
--- a/frontend/client/src/autotest/afe/HostDetailView.java
+++ b/frontend/client/src/autotest/afe/HostDetailView.java
@@ -32,7 +32,7 @@
public class HostDetailView extends DetailView
implements DataCallback, TableActionsListener, SelectableRowFilter {
private static final String[][] HOST_JOBS_COLUMNS = {
- {DataTable.WIDGET_COLUMN, ""}, {"type", "Type"}, {"job_id", "Job ID"},
+ {DataTable.WIDGET_COLUMN, ""}, {"type", "Type"}, {"job__id", "Job ID"},
{"job_owner", "Job Owner"}, {"job_name", "Job Name"}, {"started_on", "Time started"},
{"status", "Status"}
};
@@ -66,7 +66,7 @@
String key;
if (getDataSource() == normalDataSource) {
key = "host__hostname";
- sortOnColumn("job_id", SortDirection.DESCENDING);
+ sortOnColumn("job__id", SortDirection.DESCENDING);
} else {
key = "hostname";
clearSorts();
@@ -98,7 +98,7 @@
name = job.get("name").isString();
}
- row.put("job_id", jobId);
+ row.put("job__id", jobId);
row.put("job_owner", owner);
row.put("job_name", name);
diff --git a/frontend/client/src/autotest/afe/HostTableDecorator.java b/frontend/client/src/autotest/afe/HostTableDecorator.java
index 0d63020..e25a741 100644
--- a/frontend/client/src/autotest/afe/HostTableDecorator.java
+++ b/frontend/client/src/autotest/afe/HostTableDecorator.java
@@ -48,7 +48,7 @@
public HostTableDecorator(HostTable table, int rowsPerPage) {
super(table);
- table.sortOnColumn("Hostname");
+ table.sortOnColumn("hostname"); /* Case sensitive name */
table.setRowsPerPage(rowsPerPage);
addPaginators();
diff --git a/frontend/manage.py b/frontend/manage.py
index 5e78ea9..faf9daf 100755
--- a/frontend/manage.py
+++ b/frontend/manage.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+import common
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
diff --git a/frontend/settings.py b/frontend/settings.py
index e3787d1..7001afc 100644
--- a/frontend/settings.py
+++ b/frontend/settings.py
@@ -15,7 +15,7 @@
MANAGERS = ADMINS
-DATABASE_ENGINE = 'mysql_old' # 'postgresql_psycopg2', 'postgresql',
+DATABASE_ENGINE = 'mysql' # 'postgresql_psycopg2', 'postgresql',
# 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_PORT = '' # Set to empty string for default.
# Not used with sqlite3.
diff --git a/frontend/setup_test_environment.py b/frontend/setup_test_environment.py
index 77b5962..1c9dad3 100644
--- a/frontend/setup_test_environment.py
+++ b/frontend/setup_test_environment.py
@@ -36,7 +36,7 @@
def run_syncdb(verbosity=0):
- management.syncdb(verbosity, interactive=False)
+ management.call_command('syncdb', verbosity=verbosity, interactive=False)
def destroy_test_database():
diff --git a/frontend/templates/admin/afe/test/change_list.html b/frontend/templates/admin/afe/test/change_list.html
index 2409e89..7af4c5d 100644
--- a/frontend/templates/admin/afe/test/change_list.html
+++ b/frontend/templates/admin/afe/test/change_list.html
@@ -1,26 +1,43 @@
{% extends "admin/base_site.html" %}
{% load adminmedia admin_list i18n %}
+
{% block stylesheet %}{% admin_media_prefix %}css/changelists.css{% endblock %}
+
+<!-- This file is a copy of django/contrib/admin/templates/admin/change_list.html with the following extrastyle added: -->
{% block extrastyle %}
<style type="text/css">
table tbody td { white-space:pre; }
</style>
{% endblock %}
+
{% block bodyclass %}change-list{% endblock %}
-{% block userlinks %}<a href="../../doc/">{% trans 'Documentation' %}</a> / <a href="../../password_change/">{% trans 'Change password' %}</a> / <a href="../../logout/">{% trans 'Log out' %}</a>{% endblock %}
-{% if not is_popup %}{% block breadcrumbs %}<div class="breadcrumbs"><a href="../../">{% trans "Home" %}</a> › {{ cl.opts.verbose_name_plural|capfirst|escape }}</div>{% endblock %}{% endif %}
+
+{% if not is_popup %}{% block breadcrumbs %}<div class="breadcrumbs"><a href="../../">{% trans "Home" %}</a> › <a href="../">{{ app_label|capfirst }}</a> › {{ cl.opts.verbose_name_plural|capfirst }}</div>{% endblock %}{% endif %}
+
{% block coltype %}flex{% endblock %}
+
{% block content %}
<div id="content-main">
{% block object-tools %}
{% if has_add_permission %}
-<ul class="object-tools"><li><a href="add/{% if is_popup %}?_popup=1{% endif %}" class="addlink">{% blocktrans with cl.opts.verbose_name|escape as name %}Add {{ name }}{% endblocktrans %}</a></li></ul>
+<ul class="object-tools"><li><a href="add/{% if is_popup %}?_popup=1{% endif %}" class="addlink">{% blocktrans with cl.opts.verbose_name as name %}Add {{ name }}{% endblocktrans %}</a></li></ul>
{% endif %}
{% endblock %}
<div class="module{% if cl.has_filters %} filtered{% endif %}" id="changelist">
{% block search %}{% search_form cl %}{% endblock %}
{% block date_hierarchy %}{% date_hierarchy cl %}{% endblock %}
-{% block filters %}{% filters cl %}{% endblock %}
+
+{% block filters %}
+{% if cl.has_filters %}
+<div id="changelist-filter">
+<h2>{% trans 'Filter' %}</h2>
+{% for spec in cl.filter_specs %}
+ {% admin_list_filter cl spec %}
+{% endfor %}
+</div>
+{% endif %}
+{% endblock %}
+
{% block result_list %}{% result_list cl %}{% endblock %}
{% block pagination %}{% pagination cl %}{% endblock %}
</div>
diff --git a/frontend/urls.py b/frontend/urls.py
index 15554d2..3aeb5d7 100644
--- a/frontend/urls.py
+++ b/frontend/urls.py
@@ -1,21 +1,25 @@
from django.conf.urls.defaults import *
from django.conf import settings
+# The next two lines enable the admin and load each admin.py file:
+from django.contrib import admin
+admin.autodiscover()
+
RE_PREFIX = '^' + settings.URL_PREFIX
handler500 = 'frontend.afe.views.handler500'
pattern_list = (
- (RE_PREFIX + r'admin/', include('django.contrib.admin.urls')),
- (RE_PREFIX, include('frontend.afe.urls')),
-)
+ (RE_PREFIX + r'admin/(.*)', admin.site.root),
+ (RE_PREFIX, include('frontend.afe.urls')),
+ )
debug_pattern_list = (
- # redirect /tko and /results to local apache server
- (r'^(?P<path>(tko|results)/.*)$',
- 'frontend.afe.views.redirect_with_extra_data',
- {'url': 'http://%(server_name)s/%(path)s?%(getdata)s'}),
-)
+ # redirect /tko and /results to local apache server
+ (r'^(?P<path>(tko|results)/.*)$',
+ 'frontend.afe.views.redirect_with_extra_data',
+ {'url': 'http://%(server_name)s/%(path)s?%(getdata)s'}),
+ )
if settings.DEBUG:
pattern_list += debug_pattern_list