Here is a patch, which extends the autotest system with recurring job
executions. When you create a new recurring job, you can specify:
- start time (on server)
- loop count (0 means infinite): how many times it will executed
- loop period: how many time will wait between two execution

Added features:
- Create job: can create Template job.
- View job: added "Create recurring job"
- New tab "Recurring job"
  - list of recurring jobs
     - can click on it to view the executed job
     - selection support
     - Action -> remove
  - creation  panel (can be accessible thru "Create recurring job")
     - submit/create
     - reset
     - data validity check

From: Zoltan Sogor <weth@inf.u-szeged.hu>
Signed-off-by: Steve Howard <showard@google.com>



git-svn-id: http://test.kernel.org/svn/autotest/trunk@3064 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/frontend/afe/models.py b/frontend/afe/models.py
index c53eb42..836a773 100644
--- a/frontend/afe/models.py
+++ b/frontend/afe/models.py
@@ -47,10 +47,10 @@
     valid_objects = model_logic.ValidObjectsManager()
 
 
-    def enqueue_job(self, job):
+    def enqueue_job(self, job, is_template=False):
         """Enqueue a job on an associated atomic group of hosts."""
-        queue_entry = HostQueueEntry(atomic_group=self, job=job,
-                                     status=HostQueueEntry.Status.QUEUED)
+        queue_entry = HostQueueEntry.create(atomic_group=self, job=job,
+                                            is_template=is_template)
         queue_entry.save()
 
 
@@ -100,11 +100,11 @@
         self.host_set.clear()
 
 
-    def enqueue_job(self, job, atomic_group=None):
+    def enqueue_job(self, job, atomic_group=None, is_template=False):
         """Enqueue a job on any host of this label."""
-        queue_entry = HostQueueEntry(meta_host=self, job=job,
-                                     status=HostQueueEntry.Status.QUEUED,
-                                     atomic_group=atomic_group)
+        queue_entry = HostQueueEntry.create(meta_host=self, job=job,
+                                            is_template=is_template,
+                                            atomic_group=atomic_group)
         queue_entry.save()
 
 
@@ -285,11 +285,11 @@
         logger.info(self.hostname + ' -> ' + self.status)
 
 
-    def enqueue_job(self, job, atomic_group=None):
+    def enqueue_job(self, job, atomic_group=None, is_template=False):
         """Enqueue a job on this host."""
-        queue_entry = HostQueueEntry(host=self, job=job,
-                                     status=HostQueueEntry.Status.QUEUED,
-                                     atomic_group=atomic_group)
+        queue_entry = HostQueueEntry.create(host=self, job=job,
+                                            is_template=is_template,
+                                            atomic_group=atomic_group)
         # allow recovery of dead hosts from the frontend
         if not self.active_queue_entry() and self.is_dead():
             self.status = Host.Status.READY
@@ -697,14 +697,24 @@
         return job
 
 
-    def queue(self, hosts, atomic_group=None):
+    def queue(self, hosts, atomic_group=None, is_template=False):
         """Enqueue a job on the given hosts."""
         if atomic_group and not hosts:
             # No hosts or labels are required to queue an atomic group
             # Job.  However, if they are given, we respect them below.
-            atomic_group.enqueue_job(self)
+            atomic_group.enqueue_job(self, is_template=is_template)
         for host in hosts:
-            host.enqueue_job(self, atomic_group=atomic_group)
+            host.enqueue_job(self, atomic_group=atomic_group,
+                             is_template=is_template)
+
+
+    def create_recurring_job(self, start_date, loop_period, loop_count, owner):
+        rec = RecurringRun(job=self, start_date=start_date,
+                           loop_period=loop_period,
+                           loop_count=loop_count,
+                           owner=User.objects.get(login=owner))
+        rec.save()
+        return rec.id
 
 
     def user(self):
@@ -747,11 +757,11 @@
 class HostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
     Status = enum.Enum('Queued', 'Starting', 'Verifying', 'Pending', 'Running',
                        'Gathering', 'Parsing', 'Aborted', 'Completed',
-                       'Failed', 'Stopped', string_values=True)
+                       'Failed', 'Stopped', 'Template', string_values=True)
     ACTIVE_STATUSES = (Status.STARTING, Status.VERIFYING, Status.PENDING,
                        Status.RUNNING, Status.GATHERING)
     COMPLETE_STATUSES = (Status.ABORTED, Status.COMPLETED, Status.FAILED,
-                         Status.STOPPED)
+                         Status.STOPPED, Status.TEMPLATE)
 
     job = dbmodels.ForeignKey(Job)
     host = dbmodels.ForeignKey(Host, blank=True, null=True)
@@ -775,6 +785,18 @@
         self._record_attributes(['status'])
 
 
+    @classmethod
+    def create(cls, job, host=None, meta_host=None, atomic_group=None,
+                 is_template=False):
+        if is_template:
+            status = cls.Status.TEMPLATE
+        else:
+            status = cls.Status.QUEUED
+
+        return cls(job=job, host=host, meta_host=meta_host,
+                   atomic_group=atomic_group, status=status)
+
+
     def save(self):
         self._set_active_and_complete()
         super(HostQueueEntry, self).save()
@@ -865,3 +887,29 @@
 
     class Meta:
         db_table = 'aborted_host_queue_entries'
+
+
+class RecurringRun(dbmodels.Model, model_logic.ModelExtensions):
+    """\
+    job: job to use as a template
+    owner: owner of the instantiated template
+    start_date: Run the job at scheduled date
+    loop_period: Re-run (loop) the job periodically
+                 (in every loop_period seconds)
+    loop_count: Re-run (loop) count
+    """
+
+    job = dbmodels.ForeignKey(Job)
+    owner = dbmodels.ForeignKey(User)
+    start_date = dbmodels.DateTimeField()
+    loop_period = dbmodels.IntegerField(blank=True)
+    loop_count = dbmodels.IntegerField(blank=True)
+
+    objects = model_logic.ExtendedManager()
+
+    class Meta:
+        db_table = 'recurring_run'
+
+    def __str__(self):
+        return 'RecurringRun(job %s, start %s, period %s, count %s)' % (
+            self.job.id, self.start_date, self.loop_period, self.loop_count)
diff --git a/frontend/afe/rpc_interface.py b/frontend/afe/rpc_interface.py
index a4d3fba..394605f 100644
--- a/frontend/afe/rpc_interface.py
+++ b/frontend/afe/rpc_interface.py
@@ -29,6 +29,7 @@
 
 __author__ = 'showard@google.com (Steve Howard)'
 
+import datetime
 from frontend import thread_local
 from frontend.afe import models, model_logic, control_file, rpc_utils
 from autotest_lib.client.common_lib import global_config
@@ -325,8 +326,8 @@
     return cf_info
 
 
-def create_job(name, priority, control_file, control_type, timeout=None,
-               synch_count=None, hosts=(), meta_hosts=(),
+def create_job(name, priority, control_file, control_type, is_template=False,
+               timeout=None, synch_count=None, hosts=(), meta_hosts=(),
                run_verify=True, one_time_hosts=(), email_list='',
                dependencies=(), reboot_before=None, reboot_after=None,
                atomic_group_name=None):
@@ -336,6 +337,7 @@
     priority: Low, Medium, High, Urgent
     control_file: String contents of the control file.
     control_type: Type of control file, Client or Server.
+    is_template: If true then create a template job.
     timeout: Hours after this call returns until the job times out.
     synch_count: How many machines the job uses per autoserv execution.
                  synch_count == 1 means the job is asynchronous.  If an
@@ -390,81 +392,32 @@
     # convert hostnames & meta hosts to host/label objects
     host_objects = models.Host.smart_get_bulk(hosts)
     metahost_objects = []
-    metahost_counts = {}
     for label in meta_hosts or []:
         if label not in labels_by_name:
             raise model_logic.ValidationError(
                 {'meta_hosts' : 'Label "%s" not found' % label})
         this_label = labels_by_name[label]
         metahost_objects.append(this_label)
-        metahost_counts.setdefault(this_label, 0)
-        metahost_counts[this_label] += 1
     for host in one_time_hosts or []:
         this_host = models.Host.create_one_time_host(host)
         host_objects.append(this_host)
 
-    all_host_objects = host_objects + metahost_objects
-
-    # check that each metahost request has enough hosts under the label
-    for label, requested_count in metahost_counts.iteritems():
-        available_count = label.host_set.count()
-        if requested_count > available_count:
-            error = ("You have requested %d %s's, but there are only %d."
-                     % (requested_count, label.name, available_count))
-            raise model_logic.ValidationError({'meta_hosts' : error})
-
-    if atomic_group:
-        rpc_utils.check_atomic_group_create_job(
-                synch_count, host_objects, metahost_objects,
-                dependencies, atomic_group, labels_by_name)
-    else:
-        if synch_count is not None and synch_count > len(all_host_objects):
-            raise model_logic.ValidationError(
-                    {'hosts':
-                     'only %d hosts provided for job with synch_count = %d' %
-                     (len(all_host_objects), synch_count)})
-        atomic_hosts = models.Host.objects.filter(
-                id__in=[host.id for host in host_objects],
-                labels__atomic_group=True)
-        unusable_host_names = [host.hostname for host in atomic_hosts]
-        if unusable_host_names:
-            raise model_logic.ValidationError(
-                    {'hosts':
-                     'Host(s) "%s" are atomic group hosts but no '
-                     'atomic group was specified for this job.' %
-                     (', '.join(unusable_host_names),)})
-
-
-    rpc_utils.check_job_dependencies(host_objects, dependencies)
-    dependency_labels = [labels_by_name[label_name]
-                         for label_name in dependencies]
-
-    for label in metahost_objects + dependency_labels:
-        if label.atomic_group and not atomic_group:
-            raise model_logic.ValidationError(
-                    {'atomic_group_name':
-                     'Some meta_hosts or dependencies require an atomic group '
-                     'but no atomic_group_name was specified for this job.'})
-        elif (label.atomic_group and
-              label.atomic_group.name != atomic_group_name):
-            raise model_logic.ValidationError(
-                    {'atomic_group_name':
-                     'Some meta_hosts or dependencies require an atomic group '
-                     'other than the one requested for this job.'})
-
-    job = models.Job.create(owner=owner, name=name, priority=priority,
-                            control_file=control_file,
-                            control_type=control_type,
-                            synch_count=synch_count,
-                            hosts=all_host_objects,
-                            timeout=timeout,
-                            run_verify=run_verify,
-                            email_list=email_list.strip(),
-                            dependencies=dependency_labels,
-                            reboot_before=reboot_before,
-                            reboot_after=reboot_after)
-    job.queue(all_host_objects, atomic_group=atomic_group)
-    return job.id
+    return rpc_utils.create_new_job(owner=owner,
+                                    host_objects=host_objects,
+                                    metahost_objects=metahost_objects,
+                                    name=name,
+                                    priority=priority,
+                                    control_file=control_file,
+                                    control_type=control_type,
+                                    is_template=is_template,
+                                    synch_count=synch_count,
+                                    timeout=timeout,
+                                    run_verify=run_verify,
+                                    email_list=email_list,
+                                    dependencies=dependencies,
+                                    reboot_before=reboot_before,
+                                    reboot_after=reboot_after,
+                                    atomic_group=atomic_group)
 
 
 def abort_host_queue_entries(**filter_data):
@@ -529,66 +482,39 @@
     """\
     Retrieves all the information needed to clone a job.
     """
-    info = {}
+
     job = models.Job.objects.get(id=id)
-    query = job.hostqueueentry_set.filter()
-
-    hosts = []
-    meta_hosts = []
-    atomic_group_name = None
-
-    # For each queue entry, if the entry contains a host, add the entry into the
-    # hosts list if either:
-    #     It is not a metahost.
-    #     It was an assigned metahost, and the user wants to keep the specific
-    #         assignments.
-    # Otherwise, add the metahost to the metahosts list.
-    for queue_entry in query:
-        if (queue_entry.host and (preserve_metahosts
-                                  or not queue_entry.meta_host)):
-            if queue_entry.deleted:
-                continue
-            hosts.append(queue_entry.host)
-        else:
-            meta_hosts.append(queue_entry.meta_host.name)
-        if atomic_group_name is None:
-            if queue_entry.atomic_group is not None:
-                atomic_group_name = queue_entry.atomic_group.name
-        else:
-            assert atomic_group_name == queue_entry.atomic_group.name, (
-                    'DB inconsistency.  HostQueueEntries with multiple atomic'
-                    ' groups on job %s: %s != %s' % (
-                        id, atomic_group_name, queue_entry.atomic_group.name))
+    job_info = rpc_utils.get_job_info(job,
+                                      preserve_metahosts=preserve_metahosts)
 
     host_dicts = []
-
-    for host in hosts:
-        # one-time host
-        if host.invalid:
-            host_dict = {}
-            host_dict['hostname'] = host.hostname
-            host_dict['id'] = host.id
-            host_dict['platform'] = '(one-time host)'
-            host_dict['locked_text'] = ''
-        else:
-            host_dict = get_hosts(id=host.id)[0]
-            other_labels = host_dict['labels']
-            if host_dict['platform']:
-                other_labels.remove(host_dict['platform'])
-            host_dict['other_labels'] = ', '.join(other_labels)
+    for host in job_info['hosts']:
+        host_dict = get_hosts(id=host.id)[0]
+        other_labels = host_dict['labels']
+        if host_dict['platform']:
+            other_labels.remove(host_dict['platform'])
+        host_dict['other_labels'] = ', '.join(other_labels)
         host_dicts.append(host_dict)
 
-    meta_host_counts = {}
-    for meta_host in meta_hosts:
-        meta_host_counts.setdefault(meta_host, 0)
-        meta_host_counts[meta_host] += 1
+    for host in job_info['one_time_hosts']:
+        host_dict = dict(hostname=host.hostname,
+                         id=host.id,
+                         platform='(one-time host)',
+                         locked_text='')
+        host_dicts.append(host_dict)
 
-    info['job'] = job.get_object_dict()
-    info['job']['dependencies'] = [label.name for label
-                                   in job.dependency_labels.all()]
-    info['meta_host_counts'] = meta_host_counts
-    info['hosts'] = host_dicts
-    info['atomic_group_name'] = atomic_group_name
+    # convert keys from Label objects to strings (names of labels
+    meta_host_counts = dict((meta_host.name, count) for meta_host, count
+                            in job_info['meta_host_counts'])
+
+    info = dict(job=job.get_object_dict(),
+                meta_host_counts=meta_host_counts,
+                hosts=host_dicts)
+    info['job']['dependencies'] = job_info['dependencies']
+    if job_info['atomic_group']:
+        info['atomic_group_name'] = (job_info['atomic_group']).name
+    else:
+        info['atomic_group_name'] = None
 
     return rpc_utils.prepare_for_serialization(info)
 
@@ -624,6 +550,32 @@
     return float(complete_count) / total_count
 
 
+# recurring run
+
+def get_recurring(**filter_data):
+    return rpc_utils.prepare_rows_as_nested_dicts(
+            models.RecurringRun.query_objects(filter_data),
+            ('job', 'owner'))
+
+
+def get_num_recurring(**filter_data):
+    return models.RecurringRun.query_count(filter_data)
+
+
+def delete_recurring_runs(**filter_data):
+    to_delete = models.RecurringRun.query_objects(filter_data)
+    to_delete.delete()
+
+
+def create_recurring_run(job_id, start_date, loop_period, loop_count):
+    owner = thread_local.get_user().login
+    job = models.Job.objects.get(id=job_id)
+    return job.create_recurring_job(start_date=start_date,
+                                    loop_period=loop_period,
+                                    loop_count=loop_count,
+                                    owner=owner)
+
+
 # other
 
 def echo(data=""):
@@ -695,5 +647,11 @@
                                    "Starting": "Next in host's queue",
                                    "Stopped": "Other host(s) failed verify",
                                    "Parsing": "Awaiting parse of final results",
-                                   "Gathering": "Gathering log files"}
+                                   "Gathering": "Gathering log files",
+                                   "Template": "Template job for recurring run"}
+                                    
     return result
+
+
+def get_server_time():
+    return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
diff --git a/frontend/afe/rpc_utils.py b/frontend/afe/rpc_utils.py
index f520f38..0bef95e 100644
--- a/frontend/afe/rpc_utils.py
+++ b/frontend/afe/rpc_utils.py
@@ -313,3 +313,122 @@
         pass
 
     return text
+
+
+def _get_metahost_counts(metahost_objects):
+    metahost_counts = {}
+    for metahost in metahost_objects:
+        metahost_counts.setdefault(metahost, 0)
+        metahost_counts[metahost] += 1
+    return metahost_counts
+
+
+def get_job_info(job, preserve_metahosts=False):
+    hosts = []
+    one_time_hosts = []
+    meta_hosts = []
+    atomic_group = None
+
+    for queue_entry in job.hostqueueentry_set.filter():
+        if (queue_entry.host and (preserve_metahosts or
+                                  not queue_entry.meta_host)):
+            if queue_entry.deleted:
+                continue
+            if queue_entry.host.invalid:
+                one_time_hosts.append(queue_entry.host)
+            else:
+                hosts.append(queue_entry.host)
+        else:
+            meta_hosts.append(queue_entry.meta_host)
+        if atomic_group is None:
+            if queue_entry.atomic_group is not None:
+                atomic_group = queue_entry.atomic_group
+        else:
+            assert atomic_group.name == queue_entry.atomic_group.name, (
+                    'DB inconsistency.  HostQueueEntries with multiple atomic'
+                    ' groups on job %s: %s != %s' % (
+                        id, atomic_group.name, queue_entry.atomic_group.name))
+
+    meta_host_counts = _get_metahost_counts(meta_hosts)
+
+    info = dict(dependencies=[label.name for label
+                              in job.dependency_labels.all()],
+                hosts=hosts,
+                meta_hosts=meta_hosts,
+                meta_host_counts=meta_host_counts,
+                one_time_hosts=one_time_hosts,
+                atomic_group=atomic_group)
+    return info
+
+
+def create_new_job(owner, host_objects, metahost_objects,
+                   name, priority, control_file, control_type,
+                   is_template=False, timeout=None, synch_count=None,
+                   run_verify=True, email_list='', dependencies=[],
+                   reboot_before=None, reboot_after=None, atomic_group=None):
+    labels_by_name = dict((label.name, label)
+                     for label in models.Label.objects.all())
+    all_host_objects = host_objects + metahost_objects
+    metahost_counts = _get_metahost_counts(metahost_objects)
+
+    # check that each metahost request has enough hosts under the label
+    for label, requested_count in metahost_counts.iteritems():
+        available_count = label.host_set.count()
+        if requested_count > available_count:
+            error = ("You have requested %d %s's, but there are only %d."
+                     % (requested_count, label.name, available_count))
+            raise model_logic.ValidationError({'meta_hosts' : error})
+
+    if atomic_group:
+        check_atomic_group_create_job(
+                synch_count, host_objects, metahost_objects,
+                dependencies, atomic_group, labels_by_name)
+    else:
+        if synch_count is not None and synch_count > len(all_host_objects):
+            raise model_logic.ValidationError(
+                    {'hosts':
+                     'only %d hosts provided for job with synch_count = %d' %
+                     (len(all_host_objects), synch_count)})
+        atomic_hosts = models.Host.objects.filter(
+                id__in=[host.id for host in host_objects],
+                labels__atomic_group=True)
+        unusable_host_names = [host.hostname for host in atomic_hosts]
+        if unusable_host_names:
+            raise model_logic.ValidationError(
+                    {'hosts':
+                     'Host(s) "%s" are atomic group hosts but no '
+                     'atomic group was specified for this job.' %
+                     (', '.join(unusable_host_names),)})
+
+
+    check_job_dependencies(host_objects, dependencies)
+    dependency_labels = [labels_by_name[label_name]
+                         for label_name in dependencies]
+
+    for label in metahost_objects + dependency_labels:
+        if label.atomic_group and not atomic_group:
+            raise model_logic.ValidationError(
+                    {'atomic_group_name':
+                     'Some meta_hosts or dependencies require an atomic group '
+                     'but no atomic_group_name was specified for this job.'})
+        elif (label.atomic_group and
+              label.atomic_group.name != atomic_group.name):
+            raise model_logic.ValidationError(
+                    {'atomic_group_name':
+                     'Some meta_hosts or dependencies require an atomic group '
+                     'other than the one requested for this job.'})
+
+    job = models.Job.create(owner=owner, name=name, priority=priority,
+                            control_file=control_file,
+                            control_type=control_type,
+                            synch_count=synch_count,
+                            hosts=all_host_objects,
+                            timeout=timeout,
+                            run_verify=run_verify,
+                            email_list=email_list.strip(),
+                            dependencies=dependency_labels,
+                            reboot_before=reboot_before,
+                            reboot_after=reboot_after)
+    job.queue(all_host_objects, atomic_group=atomic_group,
+              is_template=is_template)
+    return job.id
diff --git a/frontend/client/src/autotest/afe/AfeClient.java b/frontend/client/src/autotest/afe/AfeClient.java
index e3f0fa3..490237f 100644
--- a/frontend/client/src/autotest/afe/AfeClient.java
+++ b/frontend/client/src/autotest/afe/AfeClient.java
@@ -5,6 +5,7 @@
 import autotest.afe.HostListView.HostListListener;
 import autotest.afe.JobDetailView.JobDetailListener;
 import autotest.afe.JobListView.JobSelectListener;
+import autotest.afe.RecurringView.RecurringSelectListener;
 import autotest.afe.UserPreferencesView.UserPreferencesListener;
 import autotest.common.CustomHistory;
 import autotest.common.JsonRpcProxy;
@@ -22,6 +23,7 @@
 public class AfeClient implements EntryPoint {
     private JobListView jobList;
     private JobDetailView jobDetail;
+    private RecurringView recurringView;
     private CreateJobView createJob;
     private HostListView hostListView;
     private HostDetailView hostDetailView;
@@ -63,7 +65,20 @@
                 createJob.cloneJob(cloneInfo);
                 mainTabPanel.selectTabView(createJob);
             }
+            
+            public void onCreateRecurringJob(int jobId) {
+                recurringView.ensureInitialized();
+                recurringView.createRecurringJob(jobId);
+                mainTabPanel.selectTabView(recurringView);
+            }
         });
+        
+        recurringView = new RecurringView(new RecurringSelectListener() {                                                                                      
+            public void onRecurringSelected(int jobId) {
+            	showJob(jobId);
+            }
+        });
+            
         createJob = AfeUtils.factory.getCreateJobView(new JobCreateListener() {
             public void onJobCreated(int jobId) {
                 showJob(jobId);
@@ -85,7 +100,7 @@
             }
         });
         
-        TabView[] tabViews = new TabView[] {jobList, jobDetail, createJob, 
+        TabView[] tabViews = new TabView[] {jobList, jobDetail, recurringView, createJob, 
                                             hostListView, hostDetailView, userPreferencesView};
         for(int i = 0; i < tabViews.length; i++) {
             mainTabPanel.addTabView(tabViews[i]);
diff --git a/frontend/client/src/autotest/afe/AfeUtils.java b/frontend/client/src/autotest/afe/AfeUtils.java
index 715b2b4..79d4fd3 100644
--- a/frontend/client/src/autotest/afe/AfeUtils.java
+++ b/frontend/client/src/autotest/afe/AfeUtils.java
@@ -7,6 +7,7 @@
 import autotest.common.StaticDataRepository;
 import autotest.common.Utils;
 import autotest.common.table.JSONObjectSet;
+import autotest.common.table.ListFilter;
 import autotest.common.ui.NotifyManager;
 import autotest.common.ui.RadioChooser;
 
@@ -26,6 +27,7 @@
  */
 public class AfeUtils {
     public static final String PLATFORM_SUFFIX = " (platform)";
+    private static final String ALL_USERS = "All Users";
     
     public static final ClassFactory factory = new SiteClassFactory();
 
@@ -172,4 +174,46 @@
             chooser.addChoice(Utils.jsonToString(jsonOption));
         }
     }
+
+    public static int parsePositiveIntegerInput(String input, String fieldName) {
+        final int parsedInt;
+        try {
+            if (input.equals("") ||
+                (parsedInt = Integer.parseInt(input)) <= 0) {
+                    String error = "Please enter a positive " + fieldName;
+                    NotifyManager.getInstance().showError(error);
+                    throw new IllegalArgumentException();
+            }
+        } catch (NumberFormatException e) {
+            String error = "Invalid " + fieldName + ": \"" + input + "\"";
+            NotifyManager.getInstance().showError(error);
+            throw new IllegalArgumentException();
+        }
+        return parsedInt;
+    }
+
+    public static void removeSecondsFromDateField(JSONObject row,
+                                                  String sourceFieldName,
+                                                  String targetFieldName) {
+        JSONValue dateValue = row.get(sourceFieldName);
+        String date = "";
+        if (dateValue.isNull() == null) {
+            date = dateValue.isString().stringValue();
+            date = date.substring(0, date.length() - 3);
+        }
+        row.put(targetFieldName, new JSONString(date));
+    }
+
+    public static ListFilter getUserFilter(String field) {
+        ListFilter userFilter = new ListFilter(field);
+        userFilter.setMatchAllText(ALL_USERS);
+
+        JSONArray userArray = staticData.getData("users").isArray();
+        String[] userStrings = Utils.JSONObjectsToStrings(userArray, "login");
+        userFilter.setChoices(userStrings);
+        String currentUser = staticData.getCurrentUserLogin();
+        userFilter.setSelectedChoice(currentUser);
+
+        return userFilter;
+    }
 }
diff --git a/frontend/client/src/autotest/afe/CreateJobView.java b/frontend/client/src/autotest/afe/CreateJobView.java
index cb68457..4f0296d 100644
--- a/frontend/client/src/autotest/afe/CreateJobView.java
+++ b/frontend/client/src/autotest/afe/CreateJobView.java
@@ -172,6 +172,7 @@
     protected Button editControlButton = new Button(EDIT_CONTROL_STRING);
     protected HostSelector hostSelector;
     protected Button submitJobButton = new Button("Submit Job");
+    protected Button createTemplateJobButton = new Button("Create Template Job");
     private Button resetButton = new Button("Reset");
     
     protected boolean controlEdited = false;
@@ -501,7 +502,13 @@
         
         submitJobButton.addClickListener(new ClickListener() {
             public void onClick(Widget sender) {
-                submitJob();
+                submitJob(false);
+            }
+        });
+        
+        createTemplateJobButton.addClickListener(new ClickListener() {
+            public void onClick(Widget sender) {
+                submitJob(true);
             }
         });
         
@@ -525,6 +532,7 @@
         RootPanel.get("create_profilers").add(profilersPanel);
         RootPanel.get("create_edit_control").add(controlFilePanel);
         RootPanel.get("create_submit").add(submitJobButton);
+        RootPanel.get("create_template_job").add(createTemplateJobButton);
         RootPanel.get("create_reset").add(resetButton);
         
         testSelector.setListener(this);
@@ -556,7 +564,7 @@
         dependencies = new JSONArray();
     }
     
-    protected void submitJob() {
+    protected void submitJob(final boolean isTemplate) {
         final int timeoutValue, synchCount;
         try {
             timeoutValue = parsePositiveIntegerInput(timeout.getText(), "timeout");
@@ -583,6 +591,7 @@
                 args.put("timeout", new JSONNumber(timeoutValue));
                 args.put("email_list", new JSONString(emailList.getText()));
                 args.put("run_verify", JSONBoolean.getInstance(!skipVerify.isChecked()));
+                args.put("is_template", JSONBoolean.getInstance(isTemplate));
                 args.put("reboot_before", new JSONString(rebootBefore.getSelectedChoice()));
                 args.put("reboot_after", new JSONString(rebootAfter.getSelectedChoice()));
                 HostSelector.HostSelection hosts = hostSelector.getSelectedHosts();
diff --git a/frontend/client/src/autotest/afe/JobDetailView.java b/frontend/client/src/autotest/afe/JobDetailView.java
index 07c5c83..bf4c2be 100644
--- a/frontend/client/src/autotest/afe/JobDetailView.java
+++ b/frontend/client/src/autotest/afe/JobDetailView.java
@@ -55,6 +55,7 @@
     public interface JobDetailListener {
         public void onHostSelected(String hostname);
         public void onCloneJob(JSONValue result);
+        public void onCreateRecurringJob(int id);
     }
     
     protected int jobId = NO_JOB_ID;
@@ -65,6 +66,7 @@
     protected SimpleFilter jobFilter = new SimpleFilter();
     protected Button abortButton = new Button("Abort job");
     protected Button cloneButton = new Button("Clone job");
+    protected Button recurringButton = new Button("Create recurring job");
     protected HTML tkoResultsHtml = new HTML();
     protected ScrollPanel tkoResultsScroller = new ScrollPanel(tkoResultsHtml);
     protected JobDetailListener listener;
@@ -185,6 +187,13 @@
         });
         RootPanel.get("view_clone").add(cloneButton);
         
+        recurringButton.addClickListener(new ClickListener() {
+            public void onClick(Widget sender) {
+                createRecurringJob();
+            } 
+        });
+        RootPanel.get("view_recurring").add(recurringButton);
+        
         tkoResultsScroller.setStyleName("results-frame");
         RootPanel.get("tko_results").add(tkoResultsScroller);
     }
@@ -248,6 +257,10 @@
             }
         });
     }
+
+    private void createRecurringJob() {
+        listener.onCreateRecurringJob(jobId);
+    }
     
     private String getResultsURL(int jobId) {
         return "/new_tko/#tab_id=spreadsheet_view&row=hostname&column=test_name&" +
diff --git a/frontend/client/src/autotest/afe/JobListView.java b/frontend/client/src/autotest/afe/JobListView.java
index 7741afa..d7d31ba 100644
--- a/frontend/client/src/autotest/afe/JobListView.java
+++ b/frontend/client/src/autotest/afe/JobListView.java
@@ -1,8 +1,6 @@
 package autotest.afe;
 
 import autotest.common.SimpleCallback;
-import autotest.common.StaticDataRepository;
-import autotest.common.Utils;
 import autotest.common.CustomHistory.HistoryToken;
 import autotest.common.table.LinkSetFilter;
 import autotest.common.table.ListFilter;
@@ -27,7 +25,6 @@
 
 
 public class JobListView extends TabView implements TableActionsListener {
-    protected static final String ALL_USERS = "All Users";
     protected static final String SELECTED_LINK_STYLE = "selected-link";
     protected static final int JOBS_PER_PAGE = 30;
     protected static final int QUEUED = 0, RUNNING = 1, FINISHED = 2, 
@@ -98,15 +95,6 @@
         jobTable.refresh();
     }
 
-    protected void populateUsers() {
-        StaticDataRepository staticData = StaticDataRepository.getRepository();
-        JSONArray userArray = staticData.getData("users").isArray();
-        String[] userStrings = Utils.JSONObjectsToStrings(userArray, "login");
-        ownerFilter.setChoices(userStrings);
-        String currentUser = staticData.getCurrentUserLogin();
-        ownerFilter.setSelectedChoice(currentUser);
-    }
-
     public JobListView(JobSelectListener listener) {
         selectListener = listener;
     }
@@ -133,10 +121,8 @@
         tableDecorator.addTableActionsPanel(this, true);
         RootPanel.get("job_table").add(tableDecorator);
         
-        ownerFilter = new ListFilter("owner");
-        ownerFilter.setMatchAllText("All users");
+        ownerFilter = AfeUtils.getUserFilter("owner");
         jobTable.addFilter(ownerFilter);
-        populateUsers();
         RootPanel.get("user_list").add(ownerFilter.getWidget());
         
         nameFilter = new SearchFilter("name", false);
diff --git a/frontend/client/src/autotest/afe/JobTable.java b/frontend/client/src/autotest/afe/JobTable.java
index e2419a2..7f424fe 100644
--- a/frontend/client/src/autotest/afe/JobTable.java
+++ b/frontend/client/src/autotest/afe/JobTable.java
@@ -32,12 +32,6 @@
         row.put(HOSTS_SUMMARY, new JSONString(countString));
         
         // remove seconds from created time
-        JSONValue createdValue = row.get("created_on");
-        String created = "";
-        if (createdValue.isNull() == null) {
-            created = createdValue.isString().stringValue();
-            created = created.substring(0, created.length() - 3);
-        }
-        row.put(CREATED_TEXT, new JSONString(created));
+        AfeUtils.removeSecondsFromDateField(row, "created_on", CREATED_TEXT);
     }
 }
diff --git a/frontend/client/src/autotest/afe/RecurringTable.java b/frontend/client/src/autotest/afe/RecurringTable.java
new file mode 100644
index 0000000..eb240b0
--- /dev/null
+++ b/frontend/client/src/autotest/afe/RecurringTable.java
@@ -0,0 +1,42 @@
+package autotest.afe;
+
+import autotest.common.table.DynamicTable;
+import autotest.common.table.RpcDataSource;
+import autotest.common.table.DataSource.SortDirection;
+
+import com.google.gwt.json.client.JSONObject;
+import com.google.gwt.json.client.JSONString;
+import com.google.gwt.json.client.JSONValue;
+
+/**
+ * A table to display scheduled runs.
+ */
+public class RecurringTable extends DynamicTable {
+    public static final String START_DATE_TEXT = "start_date";
+
+    private static final String[][] RECURRING_COLUMNS = { 
+                            {CLICKABLE_WIDGET_COLUMN, "Select"}, 
+                            { "job_id", "Job ID" },
+                            { "owner", "Owner" },
+                            { START_DATE_TEXT, "Recurring start" }, 
+                            { "loop_period", "Loop period" },
+                            { "loop_count", "Loop count" }};
+
+    public RecurringTable() {
+        super(RECURRING_COLUMNS, new RpcDataSource("get_recurring",
+                                                   "get_num_recurring"));
+        sortOnColumn("id", SortDirection.DESCENDING);
+    }
+    
+    @Override
+    protected void preprocessRow(JSONObject row) {
+        JSONObject job = row.get("job").isObject();
+        int jobId = (int) job.get("id").isNumber().doubleValue();
+        JSONObject owner = row.get("owner").isObject();
+        row.put("job_id", new JSONString(Integer.toString(jobId)));
+        row.put("owner", owner.get("login"));
+        // remove seconds from start_date
+        AfeUtils.removeSecondsFromDateField(row, "start_date", START_DATE_TEXT);
+        
+    }
+}
diff --git a/frontend/client/src/autotest/afe/RecurringView.java b/frontend/client/src/autotest/afe/RecurringView.java
new file mode 100644
index 0000000..7d20c0d
--- /dev/null
+++ b/frontend/client/src/autotest/afe/RecurringView.java
@@ -0,0 +1,268 @@
+package autotest.afe;
+
+import autotest.common.JsonRpcCallback;
+import autotest.common.JsonRpcProxy;
+import autotest.common.table.LinkSetFilter;
+import autotest.common.table.ListFilter;
+import autotest.common.table.SelectionManager;
+import autotest.common.table.TableDecorator;
+import autotest.common.table.DynamicTable.DynamicTableListener;
+import autotest.common.ui.ContextMenu;
+import autotest.common.ui.NotifyManager;
+import autotest.common.ui.Paginator;
+import autotest.common.ui.TabView;
+import autotest.common.ui.TableActionsPanel.TableActionsListener;
+
+import com.google.gwt.json.client.JSONArray;
+import com.google.gwt.json.client.JSONBoolean;
+import com.google.gwt.json.client.JSONObject;
+import com.google.gwt.json.client.JSONNumber;
+import com.google.gwt.json.client.JSONString;
+import com.google.gwt.json.client.JSONValue;
+import com.google.gwt.user.client.Command;
+import com.google.gwt.user.client.ui.HorizontalPanel;
+import com.google.gwt.user.client.ui.Hyperlink;
+import com.google.gwt.user.client.ui.RootPanel;
+import com.google.gwt.user.client.ui.TextBox;
+import com.google.gwt.user.client.ui.VerticalPanel;
+import com.google.gwt.user.client.ui.FlexTable;
+import com.google.gwt.user.client.ui.Label;
+import com.google.gwt.user.client.ui.HasAlignment;
+import com.google.gwt.user.client.ui.Button;
+import com.google.gwt.user.client.ui.ClickListener;
+import com.google.gwt.user.client.ui.Widget;
+import com.google.gwt.i18n.client.DateTimeFormat;
+
+import java.util.Set;
+import java.util.Date;
+
+public class RecurringView extends TabView implements TableActionsListener {
+    private static final int RECURRINGRUN_PER_PAGE = 30;
+    private static final int DEFAULT_LOOP_DELAY = 3600;
+    private static final int DEFAULT_LOOP_COUNT = 1;
+
+    private JSONObject jobFilterArgs = new JSONObject();
+    private RecurringSelectListener selectListener;
+    private RecurringTable recurringTable;
+    private TableDecorator tableDecorator;
+    private ListFilter ownerFilter;
+    private SelectionManager selectionManager;
+    private VerticalPanel createRecurringPanel;
+    private Label jobIdLbl = new Label("");
+    private TextBox startDate = new TextBox();
+    private TextBox loopDelay = new TextBox();
+    private TextBox loopCount = new TextBox();
+    private FlexTable createRecurTable;
+    
+    private JsonRpcProxy rpcProxy = JsonRpcProxy.getProxy();
+    
+    interface RecurringSelectListener {
+        public void onRecurringSelected(int schedId);
+    }
+
+    @Override
+    public String getElementId() {
+        return "recurring_list";
+    }
+
+    @Override
+    public void refresh() {
+        super.refresh();
+        recurringTable.refresh();
+    }
+
+    public RecurringView(RecurringSelectListener listener) {
+        selectListener = listener;
+    }
+    
+    @Override
+    public void initialize() {
+        recurringTable = new RecurringTable();
+        
+        recurringTable.setRowsPerPage(RECURRINGRUN_PER_PAGE);
+        recurringTable.setClickable(true);
+        recurringTable.addListener(new DynamicTableListener() {
+            public void onRowClicked(int rowIndex, JSONObject row) {
+                JSONObject job = row.get("job").isObject();
+                int jobId = (int) job.get("id").isNumber().doubleValue();
+                selectListener.onRecurringSelected(jobId);
+            }
+            
+            public void onTableRefreshed() {}
+        });
+
+        tableDecorator = new TableDecorator(recurringTable);
+        tableDecorator.addPaginators();
+        selectionManager = tableDecorator.addSelectionManager(false);
+        recurringTable.setWidgetFactory(selectionManager);
+        tableDecorator.addTableActionsPanel(this, true);
+        RootPanel.get("recurring_table").add(tableDecorator);
+
+
+        ownerFilter = AfeUtils.getUserFilter("owner__login");
+        recurringTable.addFilter(ownerFilter);
+        RootPanel.get("recurring_user_list").add(ownerFilter.getWidget());
+
+        initRecurringPanel();
+
+        RootPanel.get("recurring_create_panel").add(createRecurringPanel);
+    }
+
+    public ContextMenu getActionMenu() {
+        ContextMenu menu = new ContextMenu();
+        menu.addItem("Remove recurring runs", new Command() {
+            public void execute() {
+                removeSelectedRecurring();
+            }
+        });
+        return menu;
+    }
+
+    private void initRecurringPanel() {
+        createRecurTable = new FlexTable();
+
+        Label createLbl = new Label("Creating recurring job");
+        Button createBtn = new Button("Create recurring job");
+        Button resetBtn = new Button("Reset");
+        Button cancelBtn = new Button("Cancel");
+
+        createRecurringPanel = new VerticalPanel();
+        createRecurringPanel.setVisible(false);
+
+        createLbl.setStyleName("title");
+        createLbl.setHorizontalAlignment(HasAlignment.ALIGN_CENTER);
+
+        setCreateTableRow(0, "Template Job Id:", jobIdLbl);
+        setCreateTableRow(1, "Start time (on server):", startDate);
+        setCreateTableRow(2, "Loop delay (in sec.):", loopDelay);
+        setCreateTableRow(3, "Loop count:", loopCount);
+
+        createRecurTable.setWidget(4, 0, createBtn);
+        createRecurTable.setWidget(4, 1, resetBtn);
+        createRecurTable.setWidget(4, 2, cancelBtn);
+
+        createRecurringPanel.add(createLbl);
+        createRecurringPanel.add(createRecurTable);
+
+        resetBtn.addClickListener(new ClickListener() {
+            public void onClick(Widget sender) {
+                resetCreate();
+            }
+        });
+
+        createBtn.addClickListener(new ClickListener() {
+            public void onClick(Widget sender) {
+                submitRecurringJob();
+            }
+        });
+
+        cancelBtn.addClickListener(new ClickListener() {
+            public void onClick(Widget sender) {
+                createRecurringPanel.setVisible(false);
+            }
+        });
+
+    }
+
+    private void setCreateTableRow(int row, String name, Widget control) {
+        createRecurTable.setText(row, 0, name);
+        createRecurTable.setWidget(row, 1, control);
+        createRecurTable.getFlexCellFormatter().setStyleName(row, 0, "field-name");
+    }
+
+    public void createRecurringJob(int jobId) {
+        createRecurringPanel.setVisible(true);
+        jobIdLbl.setText(Integer.toString(jobId));
+        resetCreate();
+    }
+
+    private void submitRecurringJob() {
+        final int delayValue, countValue;
+        try {
+            delayValue = AfeUtils.parsePositiveIntegerInput(loopDelay.getText(),
+                                                            "loop delay");
+            countValue = AfeUtils.parsePositiveIntegerInput(loopCount.getText(),
+                                                            "loop count");
+           checkDate();
+        } catch (IllegalArgumentException exc) {
+            return;
+        }
+
+        JSONObject args = new JSONObject();
+        args.put("job_id", new JSONNumber(Integer.parseInt(jobIdLbl.getText())));
+        args.put("start_date", new JSONString(startDate.getText()));
+        args.put("loop_period", new JSONNumber(delayValue));
+        args.put("loop_count", new JSONNumber(countValue));
+
+        rpcProxy.rpcCall("create_recurring_run", args, new JsonRpcCallback() {
+            @Override
+            public void onSuccess(JSONValue result) {
+                int id = (int) result.isNumber().doubleValue();
+                createRecurringPanel.setVisible(false);
+                NotifyManager.getInstance().showMessage("Recurring run " +
+                                                        Integer.toString(id) +
+                                                        " created");
+                refresh();
+            }
+        });
+    }
+
+    private void resetCreate() {
+        getServerTime();
+        loopDelay.setText(Integer.toString(DEFAULT_LOOP_DELAY));
+        loopCount.setText(Integer.toString(DEFAULT_LOOP_COUNT));
+    }
+
+    private void getServerTime() {
+        rpcProxy.rpcCall("get_server_time", null, new JsonRpcCallback() {
+            @Override
+            public void onSuccess(JSONValue result) {
+                String sTime = result.isString().stringValue();
+                startDate.setText(sTime);
+            }
+        });
+    }
+
+    private void checkDate() {
+        try {
+            DateTimeFormat fmt = DateTimeFormat.getFormat("yyyy-MM-dd HH:mm");
+            fmt.parse(startDate.getText());
+        }
+        catch (IllegalArgumentException exc) {
+            String error = "Please enter a correct date/time " +
+                           "format: yyyy-MM-dd HH:mm";
+            NotifyManager.getInstance().showError(error);
+            throw new IllegalArgumentException();
+        }
+    }
+
+    private void removeSelectedRecurring() {
+        Set<JSONObject> selectedSet = selectionManager.getSelectedObjects();
+        if (selectedSet.isEmpty()) {
+            NotifyManager.getInstance().showError("No recurring run selected");
+            return;
+        }
+
+        JSONArray ids = new JSONArray();
+        for(JSONObject jsonObj : selectedSet) {
+            ids.set(ids.size(), jsonObj.get("id"));
+        }
+
+        JSONObject params = new JSONObject();
+        params.put("id__in", ids);
+        callRemove(params);
+    }
+
+    private void callRemove(JSONObject params) {
+        JsonRpcProxy rpcProxy = JsonRpcProxy.getProxy();
+        rpcProxy.rpcCall("delete_recurring_runs", params,
+                         new JsonRpcCallback() {
+            @Override
+            public void onSuccess(JSONValue result) {
+                    NotifyManager.getInstance().showMessage("Recurring runs " +
+                                                            "removed");
+                    refresh();
+            }
+        });
+    }
+}
diff --git a/frontend/client/src/autotest/public/AfeClient.html b/frontend/client/src/autotest/public/AfeClient.html
index 1efd3ce..1476373 100644
--- a/frontend/client/src/autotest/public/AfeClient.html
+++ b/frontend/client/src/autotest/public/AfeClient.html
@@ -38,13 +38,20 @@
         <div id="job_table"></div>
       </div>
 
+      <div id="recurring_list" title="Recurring Job">
+        View recurring for user: <span id="recurring_user_list"></span>
+        <div id="recurring_table"></div>
+        <div id="recurring_create_panel" class="box" width="0"></div>
+      </div>
+
       <div id="view_job"  title="View Job">
         <span id="job_id_fetch" class="box-full">Fetch job by ID:
           <span id="job_id_fetch_controls"></span>
         </span><br><br>
         <div id="view_title" class="title"></div><br>
         <div id="view_data">
-          <span id="view_abort"></span><span id="view_clone"></span><br>
+          <span id="view_abort"></span><span id="view_clone"></span>
+          <span id="view_recurring"></span><br>
           <span class="field-name">Label:</span>
           <span id="view_label"></span><br>
           <span class="field-name">Owner:</span>
@@ -147,7 +154,8 @@
           </tr>
           <tr>
             <td id="create_submit"></td>
-            <td colspan="2" id="create_reset"></td>
+            <td id="create_template_job"></td>
+            <td id="create_reset"></td>
           </tr>
         </table>
       </div>
diff --git a/frontend/migrations/032_add_recurring_run.py b/frontend/migrations/032_add_recurring_run.py
new file mode 100644
index 0000000..08fa2c0
--- /dev/null
+++ b/frontend/migrations/032_add_recurring_run.py
@@ -0,0 +1,23 @@
+def migrate_up(manager):
+    manager.execute_script(CREATE_TABLE)
+
+def migrate_down(manager):
+    manager.execute_script(DROP_TABLE)
+
+CREATE_TABLE = """\
+CREATE TABLE `recurring_run` (
+    `id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
+    `job_id` integer NOT NULL REFERENCES `jobs` (`id`),
+    `owner_id` integer NOT NULL REFERENCES `users` (`id`),
+    `start_date` datetime NOT NULL,
+    `loop_period` integer NOT NULL,
+    `loop_count` integer NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+CREATE INDEX recurring_run_job_id ON `recurring_run` (`job_id`);
+CREATE INDEX recurring_run_owner_id ON `recurring_run` (`owner_id`);
+"""
+
+DROP_TABLE = """\
+DROP INDEX recurring_run_job_id ON `recurring_run`;
+DROP TABLE IF EXISTS `recurring_run`;
+"""