Aviv Keshet | 0b9cfc9 | 2013-02-05 11:36:02 -0800 | [diff] [blame] | 1 | # pylint: disable-msg=C0111 |
| 2 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 3 | """\ |
| 4 | Functions to expose over the RPC interface. |
| 5 | |
| 6 | For all modify* and delete* functions that ask for an 'id' parameter to |
| 7 | identify the object to operate on, the id may be either |
| 8 | * the database row ID |
| 9 | * the name of the object (label name, hostname, user login, etc.) |
| 10 | * a dictionary containing uniquely identifying field (this option should seldom |
| 11 | be used) |
| 12 | |
| 13 | When specifying foreign key fields (i.e. adding hosts to a label, or adding |
| 14 | users to an ACL group), the given value may be either the database row ID or the |
| 15 | name of the object. |
| 16 | |
| 17 | All get* functions return lists of dictionaries. Each dictionary represents one |
| 18 | object and maps field names to values. |
| 19 | |
| 20 | Some examples: |
| 21 | modify_host(2, hostname='myhost') # modify hostname of host with database ID 2 |
| 22 | modify_host('ipaj2', hostname='myhost') # modify hostname of host 'ipaj2' |
| 23 | modify_test('sleeptest', test_type='Client', params=', seconds=60') |
| 24 | delete_acl_group(1) # delete by ID |
| 25 | delete_acl_group('Everyone') # delete by name |
| 26 | acl_group_add_users('Everyone', ['mbligh', 'showard']) |
| 27 | get_jobs(owner='showard', status='Queued') |
| 28 | |
mbligh | 93c80e6 | 2009-02-03 17:48:30 +0000 | [diff] [blame] | 29 | See doctests/001_rpc_test.txt for (lots) more examples. |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 30 | """ |
| 31 | |
| 32 | __author__ = 'showard@google.com (Steve Howard)' |
| 33 | |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 34 | import sys |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 35 | import datetime |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 36 | |
Moises Osorio | 2dc7a10 | 2014-12-02 18:24:02 -0800 | [diff] [blame] | 37 | from django.db.models import Count |
showard | cafd16e | 2009-05-29 18:37:49 +0000 | [diff] [blame] | 38 | import common |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 39 | from autotest_lib.client.common_lib import priorities |
Gabe Black | 1e1c41b | 2015-02-04 23:55:15 -0800 | [diff] [blame] | 40 | from autotest_lib.client.common_lib.cros.graphite import autotest_stats |
showard | 6d7b2ff | 2009-06-10 00:16:47 +0000 | [diff] [blame] | 41 | from autotest_lib.frontend.afe import control_file, rpc_utils |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 42 | from autotest_lib.frontend.afe import models, model_logic, model_attributes |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 43 | from autotest_lib.frontend.afe import site_rpc_interface |
Moises Osorio | 2dc7a10 | 2014-12-02 18:24:02 -0800 | [diff] [blame] | 44 | from autotest_lib.frontend.tko import models as tko_models |
Jiaxi Luo | aac5457 | 2014-06-04 13:57:02 -0700 | [diff] [blame] | 45 | from autotest_lib.frontend.tko import rpc_interface as tko_rpc_interface |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 46 | from autotest_lib.server import frontend |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 47 | from autotest_lib.server import utils |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 48 | from autotest_lib.server.cros.dynamic_suite import tools |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 49 | from autotest_lib.site_utils import status_history |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 50 | |
Moises Osorio | 2dc7a10 | 2014-12-02 18:24:02 -0800 | [diff] [blame] | 51 | |
Gabe Black | 1e1c41b | 2015-02-04 23:55:15 -0800 | [diff] [blame] | 52 | _timer = autotest_stats.Timer('rpc_interface') |
Moises Osorio | 2dc7a10 | 2014-12-02 18:24:02 -0800 | [diff] [blame] | 53 | |
Eric Li | d23bc19 | 2011-02-09 14:38:57 -0800 | [diff] [blame] | 54 | def get_parameterized_autoupdate_image_url(job): |
| 55 | """Get the parameterized autoupdate image url from a parameterized job.""" |
| 56 | known_test_obj = models.Test.smart_get('autoupdate_ParameterizedJob') |
| 57 | image_parameter = known_test_obj.testparameter_set.get(test=known_test_obj, |
beeps | 8bb1f7d | 2013-08-05 01:30:09 -0700 | [diff] [blame] | 58 | name='image') |
Eric Li | d23bc19 | 2011-02-09 14:38:57 -0800 | [diff] [blame] | 59 | para_set = job.parameterized_job.parameterizedjobparameter_set |
| 60 | job_test_para = para_set.get(test_parameter=image_parameter) |
| 61 | return job_test_para.parameter_value |
| 62 | |
| 63 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 64 | # labels |
| 65 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 66 | def modify_label(id, **data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 67 | models.Label.smart_get(id).update_object(data) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 68 | |
| 69 | |
| 70 | def delete_label(id): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 71 | models.Label.smart_get(id).delete() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 72 | |
Prashanth Balasubramanian | 744898f | 2015-01-13 05:04:16 -0800 | [diff] [blame] | 73 | |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 74 | def add_label(name, ignore_exception_if_exists=False, **kwargs): |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 75 | """Adds a new label of a given name. |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 76 | |
| 77 | @param name: label name. |
| 78 | @param ignore_exception_if_exists: If True and the exception was |
| 79 | thrown due to the duplicated label name when adding a label, |
| 80 | then suppress the exception. Default is False. |
| 81 | @param kwargs: keyword args that store more info about a label |
| 82 | other than the name. |
| 83 | @return: int/long id of a new label. |
| 84 | """ |
| 85 | # models.Label.add_object() throws model_logic.ValidationError |
| 86 | # when it is given a label name that already exists. |
| 87 | # However, ValidationError can be thrown with different errors, |
| 88 | # and those errors should be thrown up to the call chain. |
| 89 | try: |
| 90 | label = models.Label.add_object(name=name, **kwargs) |
| 91 | except: |
| 92 | exc_info = sys.exc_info() |
| 93 | if ignore_exception_if_exists: |
| 94 | label = rpc_utils.get_label(name) |
| 95 | # If the exception is raised not because of duplicated |
| 96 | # "name", then raise the original exception. |
| 97 | if label is None: |
| 98 | raise exc_info[0], exc_info[1], exc_info[2] |
| 99 | else: |
| 100 | raise exc_info[0], exc_info[1], exc_info[2] |
| 101 | return label.id |
| 102 | |
| 103 | |
| 104 | def add_label_to_hosts(id, hosts): |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 105 | """Adds a label of the given id to the given hosts only in local DB. |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 106 | |
| 107 | @param id: id or name of a label. More often a label name. |
| 108 | @param hosts: The hostnames of hosts that need the label. |
| 109 | |
| 110 | @raises models.Label.DoesNotExist: If the label with id doesn't exist. |
| 111 | """ |
| 112 | label = models.Label.smart_get(id) |
| 113 | host_objs = models.Host.smart_get_bulk(hosts) |
| 114 | if label.platform: |
| 115 | models.Host.check_no_platform(host_objs) |
| 116 | label.host_set.add(*host_objs) |
| 117 | |
| 118 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 119 | @rpc_utils.route_rpc_to_master |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 120 | def label_add_hosts(id, hosts): |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 121 | """Adds a label with the given id to the given hosts. |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 122 | |
| 123 | This method should be run only on master not shards. |
Prashanth Balasubramanian | 5949b4a | 2014-11-23 12:58:30 -0800 | [diff] [blame] | 124 | The given label will be created if it doesn't exist, provided the `id` |
| 125 | supplied is a label name not an int/long id. |
| 126 | |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 127 | @param id: id or name of a label. More often a label name. |
Prashanth Balasubramanian | 5949b4a | 2014-11-23 12:58:30 -0800 | [diff] [blame] | 128 | @param hosts: A list of hostnames or ids. More often hostnames. |
| 129 | |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 130 | @raises ValueError: If the id specified is an int/long (label id) |
| 131 | while the label does not exist. |
Prashanth Balasubramanian | 5949b4a | 2014-11-23 12:58:30 -0800 | [diff] [blame] | 132 | """ |
Prashanth Balasubramanian | 5949b4a | 2014-11-23 12:58:30 -0800 | [diff] [blame] | 133 | try: |
Prashanth Balasubramanian | 5949b4a | 2014-11-23 12:58:30 -0800 | [diff] [blame] | 134 | label = models.Label.smart_get(id) |
| 135 | except models.Label.DoesNotExist: |
| 136 | # This matches the type checks in smart_get, which is a hack |
| 137 | # in and off itself. The aim here is to create any non-existent |
| 138 | # label, which we cannot do if the 'id' specified isn't a label name. |
| 139 | if isinstance(id, basestring): |
| 140 | label = models.Label.smart_get(add_label(id)) |
| 141 | else: |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 142 | raise ValueError('Label id (%s) does not exist. Please specify ' |
| 143 | 'the argument, id, as a string (label name).' |
| 144 | % id) |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 145 | |
| 146 | host_objs = models.Host.smart_get_bulk(hosts) |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 147 | # Make sure the label exists on the shard with the same id |
| 148 | # as it is on the master. |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 149 | # It is possible that the label is already in a shard because |
| 150 | # we are adding a new label only to shards of hosts that the label |
| 151 | # is going to be attached. |
| 152 | # For example, we add a label L1 to a host in shard S1. |
| 153 | # Master and S1 will have L1 but other shards won't. |
| 154 | # Later, when we add the same label L1 to hosts in shards S1 and S2, |
| 155 | # S1 already has the label but S2 doesn't. |
| 156 | # S2 should have the new label without any problem. |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 157 | # We ignore exception in such a case. |
| 158 | rpc_utils.fanout_rpc( |
| 159 | host_objs, 'add_label', name=label.name, id=label.id, |
| 160 | include_hostnames=False, ignore_exception_if_exists=True) |
| 161 | rpc_utils.fanout_rpc(host_objs, 'add_label_to_hosts', id=id) |
showard | bbabf50 | 2008-06-06 00:02:02 +0000 | [diff] [blame] | 162 | |
MK Ryu | 26f0c93 | 2015-05-28 18:14:33 -0700 | [diff] [blame] | 163 | add_label_to_hosts(id, hosts) |
| 164 | |
showard | bbabf50 | 2008-06-06 00:02:02 +0000 | [diff] [blame] | 165 | |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 166 | def remove_label_from_hosts(id, hosts): |
| 167 | """Removes a label of the given id from the given hosts only in local DB. |
| 168 | |
| 169 | @param id: id or name of a label. |
| 170 | @param hosts: The hostnames of hosts that need to remove the label from. |
| 171 | """ |
showard | be3ec04 | 2008-11-12 18:16:07 +0000 | [diff] [blame] | 172 | host_objs = models.Host.smart_get_bulk(hosts) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 173 | models.Label.smart_get(id).host_set.remove(*host_objs) |
showard | bbabf50 | 2008-06-06 00:02:02 +0000 | [diff] [blame] | 174 | |
| 175 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 176 | @rpc_utils.route_rpc_to_master |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 177 | def label_remove_hosts(id, hosts): |
| 178 | """Removes a label of the given id from the given hosts. |
| 179 | |
| 180 | This method should be run only on master not shards. |
| 181 | |
| 182 | @param id: id or name of a label. |
| 183 | @param hosts: A list of hostnames or ids. More often hostnames. |
| 184 | """ |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 185 | host_objs = models.Host.smart_get_bulk(hosts) |
| 186 | rpc_utils.fanout_rpc(host_objs, 'remove_label_from_hosts', id=id) |
| 187 | |
MK Ryu | 26f0c93 | 2015-05-28 18:14:33 -0700 | [diff] [blame] | 188 | remove_label_from_hosts(id, hosts) |
| 189 | |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 190 | |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 191 | def get_labels(exclude_filters=(), **filter_data): |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 192 | """\ |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 193 | @param exclude_filters: A sequence of dictionaries of filters. |
| 194 | |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 195 | @returns A sequence of nested dictionaries of label information. |
| 196 | """ |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 197 | labels = models.Label.query_objects(filter_data) |
| 198 | for exclude_filter in exclude_filters: |
| 199 | labels = labels.exclude(**exclude_filter) |
| 200 | return rpc_utils.prepare_rows_as_nested_dicts(labels, ('atomic_group',)) |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 201 | |
| 202 | |
| 203 | # atomic groups |
| 204 | |
showard | e9450c9 | 2009-06-30 01:58:52 +0000 | [diff] [blame] | 205 | def add_atomic_group(name, max_number_of_machines=None, description=None): |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 206 | return models.AtomicGroup.add_object( |
| 207 | name=name, max_number_of_machines=max_number_of_machines, |
| 208 | description=description).id |
| 209 | |
| 210 | |
| 211 | def modify_atomic_group(id, **data): |
| 212 | models.AtomicGroup.smart_get(id).update_object(data) |
| 213 | |
| 214 | |
| 215 | def delete_atomic_group(id): |
| 216 | models.AtomicGroup.smart_get(id).delete() |
| 217 | |
| 218 | |
| 219 | def atomic_group_add_labels(id, labels): |
| 220 | label_objs = models.Label.smart_get_bulk(labels) |
| 221 | models.AtomicGroup.smart_get(id).label_set.add(*label_objs) |
| 222 | |
| 223 | |
| 224 | def atomic_group_remove_labels(id, labels): |
| 225 | label_objs = models.Label.smart_get_bulk(labels) |
| 226 | models.AtomicGroup.smart_get(id).label_set.remove(*label_objs) |
| 227 | |
| 228 | |
| 229 | def get_atomic_groups(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 230 | return rpc_utils.prepare_for_serialization( |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 231 | models.AtomicGroup.list_objects(filter_data)) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 232 | |
| 233 | |
| 234 | # hosts |
| 235 | |
Matthew Sartori | 6818633 | 2015-04-27 17:19:53 -0700 | [diff] [blame] | 236 | def add_host(hostname, status=None, locked=None, lock_reason='', protection=None): |
| 237 | if locked and not lock_reason: |
| 238 | raise model_logic.ValidationError( |
| 239 | {'locked': 'Please provide a reason for locking when adding host.'}) |
| 240 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 241 | return models.Host.add_object(hostname=hostname, status=status, |
Matthew Sartori | 6818633 | 2015-04-27 17:19:53 -0700 | [diff] [blame] | 242 | locked=locked, lock_reason=lock_reason, |
| 243 | protection=protection).id |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 244 | |
| 245 | |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 246 | @rpc_utils.forward_single_host_rpc_to_shard |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 247 | def modify_host(id, **data): |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 248 | """Modify local attributes of a host. |
| 249 | |
| 250 | If this is called on the master, but the host is assigned to a shard, this |
| 251 | will also forward the call to the responsible shard. This means i.e. if a |
| 252 | host is being locked using this function, this change will also propagate to |
| 253 | shards. |
| 254 | |
| 255 | @param id: id of the host to modify. |
| 256 | @param **data: key=value pairs of values to set on the host. |
| 257 | """ |
showard | be0d869 | 2009-08-20 23:42:44 +0000 | [diff] [blame] | 258 | rpc_utils.check_modify_host(data) |
showard | ce7c092 | 2009-09-11 18:39:24 +0000 | [diff] [blame] | 259 | host = models.Host.smart_get(id) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 260 | |
showard | ce7c092 | 2009-09-11 18:39:24 +0000 | [diff] [blame] | 261 | rpc_utils.check_modify_host_locking(host, data) |
| 262 | host.update_object(data) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 263 | |
| 264 | |
showard | 276f944 | 2009-05-20 00:33:16 +0000 | [diff] [blame] | 265 | def modify_hosts(host_filter_data, update_data): |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 266 | """Modify local attributes of multiple hosts. |
| 267 | |
| 268 | If this is called on the master, but one of the hosts in that match the |
| 269 | filters is assigned to a shard, this will also forward the call to the |
| 270 | responsible shard. |
| 271 | |
| 272 | The filters are always applied on the master, not on the shards. This means |
| 273 | if the states of a host differ on the master and a shard, the state on the |
| 274 | master will be used. I.e. this means: |
| 275 | A host was synced to Shard 1. On Shard 1 the status of the host was set to |
| 276 | 'Repair Failed'. |
| 277 | - A call to modify_hosts with host_filter_data={'status': 'Ready'} will |
| 278 | update the host (both on the shard and on the master), because the state |
| 279 | of the host as the master knows it is still 'Ready'. |
| 280 | - A call to modify_hosts with host_filter_data={'status': 'Repair failed' |
| 281 | will not update the host, because the filter doesn't apply on the master. |
| 282 | |
showard | be0d869 | 2009-08-20 23:42:44 +0000 | [diff] [blame] | 283 | @param host_filter_data: Filters out which hosts to modify. |
| 284 | @param update_data: A dictionary with the changes to make to the hosts. |
showard | 276f944 | 2009-05-20 00:33:16 +0000 | [diff] [blame] | 285 | """ |
showard | be0d869 | 2009-08-20 23:42:44 +0000 | [diff] [blame] | 286 | rpc_utils.check_modify_host(update_data) |
showard | 276f944 | 2009-05-20 00:33:16 +0000 | [diff] [blame] | 287 | hosts = models.Host.query_objects(host_filter_data) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 288 | |
| 289 | affected_shard_hostnames = set() |
| 290 | affected_host_ids = [] |
| 291 | |
Alex Miller | 9658a95 | 2013-05-14 16:40:02 -0700 | [diff] [blame] | 292 | # Check all hosts before changing data for exception safety. |
| 293 | for host in hosts: |
| 294 | rpc_utils.check_modify_host_locking(host, update_data) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 295 | if host.shard: |
Prashanth Balasubramanian | 8c98ac1 | 2014-12-23 11:26:44 -0800 | [diff] [blame] | 296 | affected_shard_hostnames.add(host.shard.rpc_hostname()) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 297 | affected_host_ids.append(host.id) |
| 298 | |
Prashanth Balasubramanian | 8c98ac1 | 2014-12-23 11:26:44 -0800 | [diff] [blame] | 299 | if not utils.is_shard(): |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 300 | # Caution: Changing the filter from the original here. See docstring. |
| 301 | rpc_utils.run_rpc_on_multiple_hostnames( |
| 302 | 'modify_hosts', affected_shard_hostnames, |
| 303 | host_filter_data={'id__in': affected_host_ids}, |
| 304 | update_data=update_data) |
| 305 | |
showard | 276f944 | 2009-05-20 00:33:16 +0000 | [diff] [blame] | 306 | for host in hosts: |
| 307 | host.update_object(update_data) |
| 308 | |
| 309 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 310 | def add_labels_to_host(id, labels): |
| 311 | """Adds labels to a given host only in local DB. |
showard | cafd16e | 2009-05-29 18:37:49 +0000 | [diff] [blame] | 312 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 313 | @param id: id or hostname for a host. |
| 314 | @param labels: ids or names for labels. |
| 315 | """ |
| 316 | label_objs = models.Label.smart_get_bulk(labels) |
| 317 | models.Host.smart_get(id).labels.add(*label_objs) |
| 318 | |
| 319 | |
| 320 | @rpc_utils.route_rpc_to_master |
| 321 | def host_add_labels(id, labels): |
| 322 | """Adds labels to a given host. |
| 323 | |
| 324 | @param id: id or hostname for a host. |
| 325 | @param labels: ids or names for labels. |
| 326 | |
| 327 | @raises ValidationError: If adding more than one platform label. |
| 328 | """ |
| 329 | label_objs = models.Label.smart_get_bulk(labels) |
| 330 | platforms = [label.name for label in label_objs if label.platform] |
showard | cafd16e | 2009-05-29 18:37:49 +0000 | [diff] [blame] | 331 | if len(platforms) > 1: |
| 332 | raise model_logic.ValidationError( |
| 333 | {'labels': 'Adding more than one platform label: %s' % |
| 334 | ', '.join(platforms)}) |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 335 | |
| 336 | host_obj = models.Host.smart_get(id) |
showard | cafd16e | 2009-05-29 18:37:49 +0000 | [diff] [blame] | 337 | if len(platforms) == 1: |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 338 | models.Host.check_no_platform([host_obj]) |
| 339 | |
| 340 | rpc_utils.fanout_rpc([host_obj], 'add_labels_to_host', False, |
| 341 | id=id, labels=labels) |
| 342 | add_labels_to_host(id, labels) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 343 | |
| 344 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 345 | def remove_labels_from_host(id, labels): |
| 346 | """Removes labels from a given host only in local DB. |
| 347 | |
| 348 | @param id: id or hostname for a host. |
| 349 | @param labels: ids or names for labels. |
| 350 | """ |
| 351 | label_objs = models.Label.smart_get_bulk(labels) |
| 352 | models.Host.smart_get(id).labels.remove(*label_objs) |
| 353 | |
| 354 | |
| 355 | @rpc_utils.route_rpc_to_master |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 356 | def host_remove_labels(id, labels): |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 357 | """Removes labels from a given host. |
| 358 | |
| 359 | @param id: id or hostname for a host. |
| 360 | @param labels: ids or names for labels. |
| 361 | """ |
| 362 | host_obj = models.Host.smart_get(id) |
| 363 | rpc_utils.fanout_rpc([host_obj], 'remove_labels_from_host', False, |
| 364 | id=id, labels=labels) |
| 365 | remove_labels_from_host(id, labels) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 366 | |
| 367 | |
MK Ryu | acf3592 | 2014-10-03 14:56:49 -0700 | [diff] [blame] | 368 | def get_host_attribute(attribute, **host_filter_data): |
| 369 | """ |
| 370 | @param attribute: string name of attribute |
| 371 | @param host_filter_data: filter data to apply to Hosts to choose hosts to |
| 372 | act upon |
| 373 | """ |
| 374 | hosts = rpc_utils.get_host_query((), False, False, True, host_filter_data) |
| 375 | hosts = list(hosts) |
| 376 | models.Host.objects.populate_relationships(hosts, models.HostAttribute, |
| 377 | 'attribute_list') |
| 378 | host_attr_dicts = [] |
| 379 | for host_obj in hosts: |
| 380 | for attr_obj in host_obj.attribute_list: |
| 381 | if attr_obj.attribute == attribute: |
| 382 | host_attr_dicts.append(attr_obj.get_object_dict()) |
| 383 | return rpc_utils.prepare_for_serialization(host_attr_dicts) |
| 384 | |
| 385 | |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 386 | def set_host_attribute(attribute, value, **host_filter_data): |
| 387 | """ |
MK Ryu | 26f0c93 | 2015-05-28 18:14:33 -0700 | [diff] [blame] | 388 | @param attribute: string name of attribute |
| 389 | @param value: string, or None to delete an attribute |
| 390 | @param host_filter_data: filter data to apply to Hosts to choose hosts to |
| 391 | act upon |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 392 | """ |
| 393 | assert host_filter_data # disallow accidental actions on all hosts |
| 394 | hosts = models.Host.query_objects(host_filter_data) |
| 395 | models.AclGroup.check_for_acl_violation_hosts(hosts) |
| 396 | |
MK Ryu | 26f0c93 | 2015-05-28 18:14:33 -0700 | [diff] [blame] | 397 | # Master forwards this RPC to shards. |
| 398 | if not utils.is_shard(): |
| 399 | rpc_utils.fanout_rpc(hosts, 'set_host_attribute', False, |
| 400 | attribute=attribute, value=value, **host_filter_data) |
| 401 | |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 402 | for host in hosts: |
showard | f8b1904 | 2009-05-12 17:22:49 +0000 | [diff] [blame] | 403 | host.set_or_delete_attribute(attribute, value) |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 404 | |
| 405 | |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 406 | @rpc_utils.forward_single_host_rpc_to_shard |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 407 | def delete_host(id): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 408 | models.Host.smart_get(id).delete() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 409 | |
| 410 | |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 411 | def get_hosts(multiple_labels=(), exclude_only_if_needed_labels=False, |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 412 | exclude_atomic_group_hosts=False, valid_only=True, **filter_data): |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 413 | """ |
| 414 | @param multiple_labels: match hosts in all of the labels given. Should |
| 415 | be a list of label names. |
| 416 | @param exclude_only_if_needed_labels: Exclude hosts with at least one |
| 417 | "only_if_needed" label applied. |
| 418 | @param exclude_atomic_group_hosts: Exclude hosts that have one or more |
| 419 | atomic group labels associated with them. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 420 | """ |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 421 | hosts = rpc_utils.get_host_query(multiple_labels, |
| 422 | exclude_only_if_needed_labels, |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 423 | exclude_atomic_group_hosts, |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 424 | valid_only, filter_data) |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 425 | hosts = list(hosts) |
| 426 | models.Host.objects.populate_relationships(hosts, models.Label, |
| 427 | 'label_list') |
| 428 | models.Host.objects.populate_relationships(hosts, models.AclGroup, |
| 429 | 'acl_list') |
| 430 | models.Host.objects.populate_relationships(hosts, models.HostAttribute, |
| 431 | 'attribute_list') |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 432 | host_dicts = [] |
| 433 | for host_obj in hosts: |
| 434 | host_dict = host_obj.get_object_dict() |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 435 | host_dict['labels'] = [label.name for label in host_obj.label_list] |
showard | 909c914 | 2009-07-07 20:54:42 +0000 | [diff] [blame] | 436 | host_dict['platform'], host_dict['atomic_group'] = (rpc_utils. |
| 437 | find_platform_and_atomic_group(host_obj)) |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 438 | host_dict['acls'] = [acl.name for acl in host_obj.acl_list] |
| 439 | host_dict['attributes'] = dict((attribute.attribute, attribute.value) |
| 440 | for attribute in host_obj.attribute_list) |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 441 | host_dicts.append(host_dict) |
| 442 | return rpc_utils.prepare_for_serialization(host_dicts) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 443 | |
| 444 | |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 445 | def get_num_hosts(multiple_labels=(), exclude_only_if_needed_labels=False, |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 446 | exclude_atomic_group_hosts=False, valid_only=True, |
| 447 | **filter_data): |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 448 | """ |
| 449 | Same parameters as get_hosts(). |
| 450 | |
| 451 | @returns The number of matching hosts. |
| 452 | """ |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 453 | hosts = rpc_utils.get_host_query(multiple_labels, |
| 454 | exclude_only_if_needed_labels, |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 455 | exclude_atomic_group_hosts, |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 456 | valid_only, filter_data) |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 457 | return hosts.count() |
showard | 1385b16 | 2008-03-13 15:59:40 +0000 | [diff] [blame] | 458 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 459 | |
| 460 | # tests |
| 461 | |
showard | 909c7a6 | 2008-07-15 21:52:38 +0000 | [diff] [blame] | 462 | def add_test(name, test_type, path, author=None, dependencies=None, |
showard | 3d9899a | 2008-07-31 02:11:58 +0000 | [diff] [blame] | 463 | experimental=True, run_verify=None, test_class=None, |
showard | 909c7a6 | 2008-07-15 21:52:38 +0000 | [diff] [blame] | 464 | test_time=None, test_category=None, description=None, |
| 465 | sync_count=1): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 466 | return models.Test.add_object(name=name, test_type=test_type, path=path, |
showard | 909c7a6 | 2008-07-15 21:52:38 +0000 | [diff] [blame] | 467 | author=author, dependencies=dependencies, |
| 468 | experimental=experimental, |
| 469 | run_verify=run_verify, test_time=test_time, |
| 470 | test_category=test_category, |
| 471 | sync_count=sync_count, |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 472 | test_class=test_class, |
| 473 | description=description).id |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 474 | |
| 475 | |
| 476 | def modify_test(id, **data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 477 | models.Test.smart_get(id).update_object(data) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 478 | |
| 479 | |
| 480 | def delete_test(id): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 481 | models.Test.smart_get(id).delete() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 482 | |
| 483 | |
| 484 | def get_tests(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 485 | return rpc_utils.prepare_for_serialization( |
| 486 | models.Test.list_objects(filter_data)) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 487 | |
| 488 | |
Moises Osorio | 2dc7a10 | 2014-12-02 18:24:02 -0800 | [diff] [blame] | 489 | @_timer.decorate |
| 490 | def get_tests_status_counts_by_job_name_label(job_name_prefix, label_name): |
| 491 | """Gets the counts of all passed and failed tests from the matching jobs. |
| 492 | |
| 493 | @param job_name_prefix: Name prefix of the jobs to get the summary from, e.g., |
| 494 | 'butterfly-release/R40-6457.21.0/bvt-cq/'. |
| 495 | @param label_name: Label that must be set in the jobs, e.g., |
| 496 | 'cros-version:butterfly-release/R40-6457.21.0'. |
| 497 | |
| 498 | @returns A summary of the counts of all the passed and failed tests. |
| 499 | """ |
| 500 | job_ids = list(models.Job.objects.filter( |
| 501 | name__startswith=job_name_prefix, |
| 502 | dependency_labels__name=label_name).values_list( |
| 503 | 'pk', flat=True)) |
| 504 | summary = {'passed': 0, 'failed': 0} |
| 505 | if not job_ids: |
| 506 | return summary |
| 507 | |
| 508 | counts = (tko_models.TestView.objects.filter( |
| 509 | afe_job_id__in=job_ids).exclude( |
| 510 | test_name='SERVER_JOB').exclude( |
| 511 | test_name__startswith='CLIENT_JOB').values( |
| 512 | 'status').annotate( |
| 513 | count=Count('status'))) |
| 514 | for status in counts: |
| 515 | if status['status'] == 'GOOD': |
| 516 | summary['passed'] += status['count'] |
| 517 | else: |
| 518 | summary['failed'] += status['count'] |
| 519 | return summary |
| 520 | |
| 521 | |
showard | 2b9a88b | 2008-06-13 20:55:03 +0000 | [diff] [blame] | 522 | # profilers |
| 523 | |
| 524 | def add_profiler(name, description=None): |
| 525 | return models.Profiler.add_object(name=name, description=description).id |
| 526 | |
| 527 | |
| 528 | def modify_profiler(id, **data): |
| 529 | models.Profiler.smart_get(id).update_object(data) |
| 530 | |
| 531 | |
| 532 | def delete_profiler(id): |
| 533 | models.Profiler.smart_get(id).delete() |
| 534 | |
| 535 | |
| 536 | def get_profilers(**filter_data): |
| 537 | return rpc_utils.prepare_for_serialization( |
| 538 | models.Profiler.list_objects(filter_data)) |
| 539 | |
| 540 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 541 | # users |
| 542 | |
| 543 | def add_user(login, access_level=None): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 544 | return models.User.add_object(login=login, access_level=access_level).id |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 545 | |
| 546 | |
| 547 | def modify_user(id, **data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 548 | models.User.smart_get(id).update_object(data) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 549 | |
| 550 | |
| 551 | def delete_user(id): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 552 | models.User.smart_get(id).delete() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 553 | |
| 554 | |
| 555 | def get_users(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 556 | return rpc_utils.prepare_for_serialization( |
| 557 | models.User.list_objects(filter_data)) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 558 | |
| 559 | |
| 560 | # acl groups |
| 561 | |
| 562 | def add_acl_group(name, description=None): |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 563 | group = models.AclGroup.add_object(name=name, description=description) |
showard | 64a9595 | 2010-01-13 21:27:16 +0000 | [diff] [blame] | 564 | group.users.add(models.User.current_user()) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 565 | return group.id |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 566 | |
| 567 | |
| 568 | def modify_acl_group(id, **data): |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 569 | group = models.AclGroup.smart_get(id) |
| 570 | group.check_for_acl_violation_acl_group() |
| 571 | group.update_object(data) |
| 572 | group.add_current_user_if_empty() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 573 | |
| 574 | |
| 575 | def acl_group_add_users(id, users): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 576 | group = models.AclGroup.smart_get(id) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 577 | group.check_for_acl_violation_acl_group() |
showard | be3ec04 | 2008-11-12 18:16:07 +0000 | [diff] [blame] | 578 | users = models.User.smart_get_bulk(users) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 579 | group.users.add(*users) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 580 | |
| 581 | |
| 582 | def acl_group_remove_users(id, users): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 583 | group = models.AclGroup.smart_get(id) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 584 | group.check_for_acl_violation_acl_group() |
showard | be3ec04 | 2008-11-12 18:16:07 +0000 | [diff] [blame] | 585 | users = models.User.smart_get_bulk(users) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 586 | group.users.remove(*users) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 587 | group.add_current_user_if_empty() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 588 | |
| 589 | |
| 590 | def acl_group_add_hosts(id, hosts): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 591 | group = models.AclGroup.smart_get(id) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 592 | group.check_for_acl_violation_acl_group() |
showard | be3ec04 | 2008-11-12 18:16:07 +0000 | [diff] [blame] | 593 | hosts = models.Host.smart_get_bulk(hosts) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 594 | group.hosts.add(*hosts) |
showard | 08f981b | 2008-06-24 21:59:03 +0000 | [diff] [blame] | 595 | group.on_host_membership_change() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 596 | |
| 597 | |
| 598 | def acl_group_remove_hosts(id, hosts): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 599 | group = models.AclGroup.smart_get(id) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 600 | group.check_for_acl_violation_acl_group() |
showard | be3ec04 | 2008-11-12 18:16:07 +0000 | [diff] [blame] | 601 | hosts = models.Host.smart_get_bulk(hosts) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 602 | group.hosts.remove(*hosts) |
showard | 08f981b | 2008-06-24 21:59:03 +0000 | [diff] [blame] | 603 | group.on_host_membership_change() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 604 | |
| 605 | |
| 606 | def delete_acl_group(id): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 607 | models.AclGroup.smart_get(id).delete() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 608 | |
| 609 | |
| 610 | def get_acl_groups(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 611 | acl_groups = models.AclGroup.list_objects(filter_data) |
| 612 | for acl_group in acl_groups: |
| 613 | acl_group_obj = models.AclGroup.objects.get(id=acl_group['id']) |
| 614 | acl_group['users'] = [user.login |
| 615 | for user in acl_group_obj.users.all()] |
| 616 | acl_group['hosts'] = [host.hostname |
| 617 | for host in acl_group_obj.hosts.all()] |
| 618 | return rpc_utils.prepare_for_serialization(acl_groups) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 619 | |
| 620 | |
| 621 | # jobs |
| 622 | |
mbligh | 120351e | 2009-01-24 01:40:45 +0000 | [diff] [blame] | 623 | def generate_control_file(tests=(), kernel=None, label=None, profilers=(), |
showard | 91f8510 | 2009-10-12 20:34:52 +0000 | [diff] [blame] | 624 | client_control_file='', use_container=False, |
showard | 232b7ae | 2009-11-10 00:46:48 +0000 | [diff] [blame] | 625 | profile_only=None, upload_kernel_config=False): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 626 | """ |
mbligh | 120351e | 2009-01-24 01:40:45 +0000 | [diff] [blame] | 627 | Generates a client-side control file to load a kernel and run tests. |
| 628 | |
| 629 | @param tests List of tests to run. |
mbligh | a3c58d2 | 2009-08-24 22:01:51 +0000 | [diff] [blame] | 630 | @param kernel A list of kernel info dictionaries configuring which kernels |
| 631 | to boot for this job and other options for them |
mbligh | 120351e | 2009-01-24 01:40:45 +0000 | [diff] [blame] | 632 | @param label Name of label to grab kernel config from. |
| 633 | @param profilers List of profilers to activate during the job. |
| 634 | @param client_control_file The contents of a client-side control file to |
| 635 | run at the end of all tests. If this is supplied, all tests must be |
| 636 | client side. |
| 637 | TODO: in the future we should support server control files directly |
| 638 | to wrap with a kernel. That'll require changing the parameter |
| 639 | name and adding a boolean to indicate if it is a client or server |
| 640 | control file. |
| 641 | @param use_container unused argument today. TODO: Enable containers |
| 642 | on the host during a client side test. |
showard | 91f8510 | 2009-10-12 20:34:52 +0000 | [diff] [blame] | 643 | @param profile_only A boolean that indicates what default profile_only |
| 644 | mode to use in the control file. Passing None will generate a |
| 645 | control file that does not explcitly set the default mode at all. |
showard | 232b7ae | 2009-11-10 00:46:48 +0000 | [diff] [blame] | 646 | @param upload_kernel_config: if enabled it will generate server control |
| 647 | file code that uploads the kernel config file to the client and |
| 648 | tells the client of the new (local) path when compiling the kernel; |
| 649 | the tests must be server side tests |
mbligh | 120351e | 2009-01-24 01:40:45 +0000 | [diff] [blame] | 650 | |
| 651 | @returns a dict with the following keys: |
| 652 | control_file: str, The control file text. |
| 653 | is_server: bool, is the control file a server-side control file? |
| 654 | synch_count: How many machines the job uses per autoserv execution. |
| 655 | synch_count == 1 means the job is asynchronous. |
| 656 | dependencies: A list of the names of labels on which the job depends. |
| 657 | """ |
showard | d86debe | 2009-06-10 17:37:56 +0000 | [diff] [blame] | 658 | if not tests and not client_control_file: |
showard | 2bab8f4 | 2008-11-12 18:15:22 +0000 | [diff] [blame] | 659 | return dict(control_file='', is_server=False, synch_count=1, |
showard | 989f25d | 2008-10-01 11:38:11 +0000 | [diff] [blame] | 660 | dependencies=[]) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 661 | |
showard | 989f25d | 2008-10-01 11:38:11 +0000 | [diff] [blame] | 662 | cf_info, test_objects, profiler_objects, label = ( |
showard | 2b9a88b | 2008-06-13 20:55:03 +0000 | [diff] [blame] | 663 | rpc_utils.prepare_generate_control_file(tests, kernel, label, |
| 664 | profilers)) |
showard | 989f25d | 2008-10-01 11:38:11 +0000 | [diff] [blame] | 665 | cf_info['control_file'] = control_file.generate_control( |
mbligh | a3c58d2 | 2009-08-24 22:01:51 +0000 | [diff] [blame] | 666 | tests=test_objects, kernels=kernel, platform=label, |
mbligh | 120351e | 2009-01-24 01:40:45 +0000 | [diff] [blame] | 667 | profilers=profiler_objects, is_server=cf_info['is_server'], |
showard | 232b7ae | 2009-11-10 00:46:48 +0000 | [diff] [blame] | 668 | client_control_file=client_control_file, profile_only=profile_only, |
| 669 | upload_kernel_config=upload_kernel_config) |
showard | 989f25d | 2008-10-01 11:38:11 +0000 | [diff] [blame] | 670 | return cf_info |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 671 | |
| 672 | |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 673 | def create_parameterized_job(name, priority, test, parameters, kernel=None, |
| 674 | label=None, profilers=(), profiler_parameters=None, |
| 675 | use_container=False, profile_only=None, |
| 676 | upload_kernel_config=False, hosts=(), |
| 677 | meta_hosts=(), one_time_hosts=(), |
| 678 | atomic_group_name=None, synch_count=None, |
| 679 | is_template=False, timeout=None, |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 680 | timeout_mins=None, max_runtime_mins=None, |
| 681 | run_verify=False, email_list='', dependencies=(), |
| 682 | reboot_before=None, reboot_after=None, |
| 683 | parse_failed_repair=None, hostless=False, |
Dan Shi | ec1d47d | 2015-02-13 11:38:13 -0800 | [diff] [blame] | 684 | keyvals=None, drone_set=None, run_reset=True, |
| 685 | require_ssq=None): |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 686 | """ |
| 687 | Creates and enqueues a parameterized job. |
| 688 | |
| 689 | Most parameters a combination of the parameters for generate_control_file() |
| 690 | and create_job(), with the exception of: |
| 691 | |
| 692 | @param test name or ID of the test to run |
| 693 | @param parameters a map of parameter name -> |
| 694 | tuple of (param value, param type) |
| 695 | @param profiler_parameters a dictionary of parameters for the profilers: |
| 696 | key: profiler name |
| 697 | value: dict of param name -> tuple of |
| 698 | (param value, |
| 699 | param type) |
| 700 | """ |
| 701 | # Save the values of the passed arguments here. What we're going to do with |
| 702 | # them is pass them all to rpc_utils.get_create_job_common_args(), which |
| 703 | # will extract the subset of these arguments that apply for |
| 704 | # rpc_utils.create_job_common(), which we then pass in to that function. |
| 705 | args = locals() |
| 706 | |
| 707 | # Set up the parameterized job configs |
| 708 | test_obj = models.Test.smart_get(test) |
Aviv Keshet | 3dd8beb | 2013-05-13 17:36:04 -0700 | [diff] [blame] | 709 | control_type = test_obj.test_type |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 710 | |
| 711 | try: |
| 712 | label = models.Label.smart_get(label) |
| 713 | except models.Label.DoesNotExist: |
| 714 | label = None |
| 715 | |
| 716 | kernel_objs = models.Kernel.create_kernels(kernel) |
| 717 | profiler_objs = [models.Profiler.smart_get(profiler) |
| 718 | for profiler in profilers] |
| 719 | |
| 720 | parameterized_job = models.ParameterizedJob.objects.create( |
| 721 | test=test_obj, label=label, use_container=use_container, |
| 722 | profile_only=profile_only, |
| 723 | upload_kernel_config=upload_kernel_config) |
| 724 | parameterized_job.kernels.add(*kernel_objs) |
| 725 | |
| 726 | for profiler in profiler_objs: |
| 727 | parameterized_profiler = models.ParameterizedJobProfiler.objects.create( |
| 728 | parameterized_job=parameterized_job, |
| 729 | profiler=profiler) |
| 730 | profiler_params = profiler_parameters.get(profiler.name, {}) |
| 731 | for name, (value, param_type) in profiler_params.iteritems(): |
| 732 | models.ParameterizedJobProfilerParameter.objects.create( |
| 733 | parameterized_job_profiler=parameterized_profiler, |
| 734 | parameter_name=name, |
| 735 | parameter_value=value, |
| 736 | parameter_type=param_type) |
| 737 | |
| 738 | try: |
| 739 | for parameter in test_obj.testparameter_set.all(): |
| 740 | if parameter.name in parameters: |
| 741 | param_value, param_type = parameters.pop(parameter.name) |
| 742 | parameterized_job.parameterizedjobparameter_set.create( |
| 743 | test_parameter=parameter, parameter_value=param_value, |
| 744 | parameter_type=param_type) |
| 745 | |
| 746 | if parameters: |
| 747 | raise Exception('Extra parameters remain: %r' % parameters) |
| 748 | |
| 749 | return rpc_utils.create_job_common( |
| 750 | parameterized_job=parameterized_job.id, |
| 751 | control_type=control_type, |
| 752 | **rpc_utils.get_create_job_common_args(args)) |
| 753 | except: |
| 754 | parameterized_job.delete() |
| 755 | raise |
| 756 | |
| 757 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 758 | def create_job_page_handler(name, priority, control_file, control_type, |
| 759 | image=None, hostless=False, **kwargs): |
| 760 | """\ |
| 761 | Create and enqueue a job. |
| 762 | |
| 763 | @param name name of this job |
| 764 | @param priority Integer priority of this job. Higher is more important. |
| 765 | @param control_file String contents of the control file. |
| 766 | @param control_type Type of control file, Client or Server. |
| 767 | @param kwargs extra args that will be required by create_suite_job or |
| 768 | create_job. |
| 769 | |
| 770 | @returns The created Job id number. |
| 771 | """ |
| 772 | control_file = rpc_utils.encode_ascii(control_file) |
Jiaxi Luo | dd67beb | 2014-07-18 16:28:31 -0700 | [diff] [blame] | 773 | if not control_file: |
| 774 | raise model_logic.ValidationError({ |
| 775 | 'control_file' : "Control file cannot be empty"}) |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 776 | |
| 777 | if image and hostless: |
| 778 | return site_rpc_interface.create_suite_job( |
| 779 | name=name, control_file=control_file, priority=priority, |
| 780 | build=image, **kwargs) |
| 781 | return create_job(name, priority, control_file, control_type, image=image, |
| 782 | hostless=hostless, **kwargs) |
| 783 | |
| 784 | |
showard | 12f3e32 | 2009-05-13 21:27:42 +0000 | [diff] [blame] | 785 | def create_job(name, priority, control_file, control_type, |
| 786 | hosts=(), meta_hosts=(), one_time_hosts=(), |
| 787 | atomic_group_name=None, synch_count=None, is_template=False, |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 788 | timeout=None, timeout_mins=None, max_runtime_mins=None, |
| 789 | run_verify=False, email_list='', dependencies=(), |
| 790 | reboot_before=None, reboot_after=None, parse_failed_repair=None, |
| 791 | hostless=False, keyvals=None, drone_set=None, image=None, |
Dan Shi | ec1d47d | 2015-02-13 11:38:13 -0800 | [diff] [blame] | 792 | parent_job_id=None, test_retry=0, run_reset=True, |
| 793 | require_ssp=None, args=(), **kwargs): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 794 | """\ |
| 795 | Create and enqueue a job. |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 796 | |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 797 | @param name name of this job |
Alex Miller | 7d658cf | 2013-09-04 16:00:35 -0700 | [diff] [blame] | 798 | @param priority Integer priority of this job. Higher is more important. |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 799 | @param control_file String contents of the control file. |
| 800 | @param control_type Type of control file, Client or Server. |
| 801 | @param synch_count How many machines the job uses per autoserv execution. |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 802 | synch_count == 1 means the job is asynchronous. If an atomic group is |
| 803 | given this value is treated as a minimum. |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 804 | @param is_template If true then create a template job. |
| 805 | @param timeout Hours after this call returns until the job times out. |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 806 | @param timeout_mins Minutes after this call returns until the job times |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 807 | out. |
Simran Basi | 3421702 | 2012-11-06 13:43:15 -0800 | [diff] [blame] | 808 | @param max_runtime_mins Minutes from job starting time until job times out |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 809 | @param run_verify Should the host be verified before running the test? |
| 810 | @param email_list String containing emails to mail when the job is done |
| 811 | @param dependencies List of label names on which this job depends |
| 812 | @param reboot_before Never, If dirty, or Always |
| 813 | @param reboot_after Never, If all tests passed, or Always |
| 814 | @param parse_failed_repair if true, results of failed repairs launched by |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 815 | this job will be parsed as part of the job. |
showard | a9545c0 | 2009-12-18 22:44:26 +0000 | [diff] [blame] | 816 | @param hostless if true, create a hostless job |
showard | c1a98d1 | 2010-01-15 00:22:22 +0000 | [diff] [blame] | 817 | @param keyvals dict of keyvals to associate with the job |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 818 | @param hosts List of hosts to run job on. |
| 819 | @param meta_hosts List where each entry is a label name, and for each entry |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 820 | one host will be chosen from that label to run the job on. |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 821 | @param one_time_hosts List of hosts not in the database to run the job on. |
| 822 | @param atomic_group_name The name of an atomic group to schedule the job on. |
jamesren | 76fcf19 | 2010-04-21 20:39:50 +0000 | [diff] [blame] | 823 | @param drone_set The name of the drone set to run this test on. |
Paul Pendlebury | 5a8c6ad | 2011-02-01 07:20:17 -0800 | [diff] [blame] | 824 | @param image OS image to install before running job. |
Aviv Keshet | 0b9cfc9 | 2013-02-05 11:36:02 -0800 | [diff] [blame] | 825 | @param parent_job_id id of a job considered to be parent of created job. |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 826 | @param test_retry Number of times to retry test if the test did not |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 827 | complete successfully. (optional, default: 0) |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 828 | @param run_reset Should the host be reset before running the test? |
Dan Shi | ec1d47d | 2015-02-13 11:38:13 -0800 | [diff] [blame] | 829 | @param require_ssp Set to True to require server-side packaging to run the |
| 830 | test. If it's set to None, drone will still try to run |
| 831 | the server side with server-side packaging. If the |
| 832 | autotest-server package doesn't exist for the build or |
| 833 | image is not set, drone will run the test without server- |
| 834 | side packaging. Default is None. |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 835 | @param args A list of args to be injected into control file. |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 836 | @param kwargs extra keyword args. NOT USED. |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 837 | |
| 838 | @returns The created Job id number. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 839 | """ |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 840 | if args: |
| 841 | control_file = tools.inject_vars({'args': args}, control_file) |
| 842 | |
Simran Basi | ab5a1bf | 2014-05-28 15:39:44 -0700 | [diff] [blame] | 843 | if image is None: |
| 844 | return rpc_utils.create_job_common( |
| 845 | **rpc_utils.get_create_job_common_args(locals())) |
| 846 | |
| 847 | # When image is supplied use a known parameterized test already in the |
| 848 | # database to pass the OS image path from the front end, through the |
| 849 | # scheduler, and finally to autoserv as the --image parameter. |
| 850 | |
| 851 | # The test autoupdate_ParameterizedJob is in afe_autotests and used to |
| 852 | # instantiate a Test object and from there a ParameterizedJob. |
| 853 | known_test_obj = models.Test.smart_get('autoupdate_ParameterizedJob') |
| 854 | known_parameterized_job = models.ParameterizedJob.objects.create( |
| 855 | test=known_test_obj) |
| 856 | |
| 857 | # autoupdate_ParameterizedJob has a single parameter, the image parameter, |
| 858 | # stored in the table afe_test_parameters. We retrieve and set this |
| 859 | # instance of the parameter to the OS image path. |
| 860 | image_parameter = known_test_obj.testparameter_set.get(test=known_test_obj, |
| 861 | name='image') |
| 862 | known_parameterized_job.parameterizedjobparameter_set.create( |
| 863 | test_parameter=image_parameter, parameter_value=image, |
| 864 | parameter_type='string') |
| 865 | |
| 866 | # By passing a parameterized_job to create_job_common the job entry in |
| 867 | # the afe_jobs table will have the field parameterized_job_id set. |
| 868 | # The scheduler uses this id in the afe_parameterized_jobs table to |
| 869 | # match this job to our known test, and then with the |
| 870 | # afe_parameterized_job_parameters table to get the actual image path. |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 871 | return rpc_utils.create_job_common( |
Simran Basi | ab5a1bf | 2014-05-28 15:39:44 -0700 | [diff] [blame] | 872 | parameterized_job=known_parameterized_job.id, |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 873 | **rpc_utils.get_create_job_common_args(locals())) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 874 | |
| 875 | |
showard | 9dbdcda | 2008-10-14 17:34:36 +0000 | [diff] [blame] | 876 | def abort_host_queue_entries(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 877 | """\ |
showard | 9dbdcda | 2008-10-14 17:34:36 +0000 | [diff] [blame] | 878 | Abort a set of host queue entries. |
Fang Deng | 63b0e45 | 2014-12-19 14:38:15 -0800 | [diff] [blame] | 879 | |
| 880 | @return: A list of dictionaries, each contains information |
| 881 | about an aborted HQE. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 882 | """ |
showard | 9dbdcda | 2008-10-14 17:34:36 +0000 | [diff] [blame] | 883 | query = models.HostQueueEntry.query_objects(filter_data) |
beeps | faecbce | 2013-10-29 11:35:10 -0700 | [diff] [blame] | 884 | |
| 885 | # Dont allow aborts on: |
| 886 | # 1. Jobs that have already completed (whether or not they were aborted) |
| 887 | # 2. Jobs that we have already been aborted (but may not have completed) |
| 888 | query = query.filter(complete=False).filter(aborted=False) |
showard | dc81751 | 2008-11-12 18:16:41 +0000 | [diff] [blame] | 889 | models.AclGroup.check_abort_permissions(query) |
showard | 9dbdcda | 2008-10-14 17:34:36 +0000 | [diff] [blame] | 890 | host_queue_entries = list(query.select_related()) |
showard | 2bab8f4 | 2008-11-12 18:15:22 +0000 | [diff] [blame] | 891 | rpc_utils.check_abort_synchronous_jobs(host_queue_entries) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 892 | |
Simran Basi | c1b2676 | 2013-06-26 14:23:21 -0700 | [diff] [blame] | 893 | models.HostQueueEntry.abort_host_queue_entries(host_queue_entries) |
Fang Deng | 63b0e45 | 2014-12-19 14:38:15 -0800 | [diff] [blame] | 894 | hqe_info = [{'HostQueueEntry': hqe.id, 'Job': hqe.job_id, |
| 895 | 'Job name': hqe.job.name} for hqe in host_queue_entries] |
| 896 | return hqe_info |
showard | 9d821ab | 2008-07-11 16:54:29 +0000 | [diff] [blame] | 897 | |
| 898 | |
beeps | 8bb1f7d | 2013-08-05 01:30:09 -0700 | [diff] [blame] | 899 | def abort_special_tasks(**filter_data): |
| 900 | """\ |
| 901 | Abort the special task, or tasks, specified in the filter. |
| 902 | """ |
| 903 | query = models.SpecialTask.query_objects(filter_data) |
| 904 | special_tasks = query.filter(is_active=True) |
| 905 | for task in special_tasks: |
| 906 | task.abort() |
| 907 | |
| 908 | |
Simran Basi | 73dae55 | 2013-02-25 14:57:46 -0800 | [diff] [blame] | 909 | def _call_special_tasks_on_hosts(task, hosts): |
| 910 | """\ |
| 911 | Schedules a set of hosts for a special task. |
| 912 | |
| 913 | @returns A list of hostnames that a special task was created for. |
| 914 | """ |
| 915 | models.AclGroup.check_for_acl_violation_hosts(hosts) |
Prashanth Balasubramanian | 6edaaf9 | 2014-11-24 16:36:25 -0800 | [diff] [blame] | 916 | shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts) |
Prashanth Balasubramanian | 8c98ac1 | 2014-12-23 11:26:44 -0800 | [diff] [blame] | 917 | if shard_host_map and not utils.is_shard(): |
Prashanth Balasubramanian | 6edaaf9 | 2014-11-24 16:36:25 -0800 | [diff] [blame] | 918 | raise ValueError('The following hosts are on shards, please ' |
| 919 | 'follow the link to the shards and create jobs ' |
| 920 | 'there instead. %s.' % shard_host_map) |
Simran Basi | 73dae55 | 2013-02-25 14:57:46 -0800 | [diff] [blame] | 921 | for host in hosts: |
| 922 | models.SpecialTask.schedule_special_task(host, task) |
| 923 | return list(sorted(host.hostname for host in hosts)) |
| 924 | |
| 925 | |
showard | 1ff7b2e | 2009-05-15 23:17:18 +0000 | [diff] [blame] | 926 | def reverify_hosts(**filter_data): |
| 927 | """\ |
| 928 | Schedules a set of hosts for verify. |
mbligh | 4e545a5 | 2009-12-19 05:30:39 +0000 | [diff] [blame] | 929 | |
| 930 | @returns A list of hostnames that a verify task was created for. |
showard | 1ff7b2e | 2009-05-15 23:17:18 +0000 | [diff] [blame] | 931 | """ |
Prashanth Balasubramanian | 4098123 | 2014-12-16 19:01:58 -0800 | [diff] [blame] | 932 | hosts = models.Host.query_objects(filter_data) |
| 933 | shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts, rpc_hostnames=True) |
| 934 | |
| 935 | # Filter out hosts on a shard from those on the master, forward |
| 936 | # rpcs to the shard with an additional hostname__in filter, and |
| 937 | # create a local SpecialTask for each remaining host. |
Prashanth Balasubramanian | 8c98ac1 | 2014-12-23 11:26:44 -0800 | [diff] [blame] | 938 | if shard_host_map and not utils.is_shard(): |
Prashanth Balasubramanian | 4098123 | 2014-12-16 19:01:58 -0800 | [diff] [blame] | 939 | hosts = [h for h in hosts if h.shard is None] |
| 940 | for shard, hostnames in shard_host_map.iteritems(): |
| 941 | |
| 942 | # The main client of this module is the frontend website, and |
| 943 | # it invokes it with an 'id' or an 'id__in' filter. Regardless, |
| 944 | # the 'hostname' filter should narrow down the list of hosts on |
| 945 | # each shard even though we supply all the ids in filter_data. |
| 946 | # This method uses hostname instead of id because it fits better |
| 947 | # with the overall architecture of redirection functions in rpc_utils. |
| 948 | shard_filter = filter_data.copy() |
| 949 | shard_filter['hostname__in'] = hostnames |
| 950 | rpc_utils.run_rpc_on_multiple_hostnames( |
| 951 | 'reverify_hosts', [shard], **shard_filter) |
| 952 | |
| 953 | # There is a race condition here if someone assigns a shard to one of these |
| 954 | # hosts before we create the task. The host will stay on the master if: |
| 955 | # 1. The host is not Ready |
| 956 | # 2. The host is Ready but has a task |
| 957 | # But if the host is Ready and doesn't have a task yet, it will get sent |
| 958 | # to the shard as we're creating a task here. |
| 959 | |
| 960 | # Given that we only rarely verify Ready hosts it isn't worth putting this |
| 961 | # entire method in a transaction. The worst case scenario is that we have |
| 962 | # a verify running on a Ready host while the shard is using it, if the verify |
| 963 | # fails no subsequent tasks will be created against the host on the master, |
| 964 | # and verifies are safe enough that this is OK. |
| 965 | return _call_special_tasks_on_hosts(models.SpecialTask.Task.VERIFY, hosts) |
Simran Basi | 73dae55 | 2013-02-25 14:57:46 -0800 | [diff] [blame] | 966 | |
| 967 | |
| 968 | def repair_hosts(**filter_data): |
| 969 | """\ |
| 970 | Schedules a set of hosts for repair. |
| 971 | |
| 972 | @returns A list of hostnames that a repair task was created for. |
| 973 | """ |
| 974 | return _call_special_tasks_on_hosts(models.SpecialTask.Task.REPAIR, |
| 975 | models.Host.query_objects(filter_data)) |
showard | 1ff7b2e | 2009-05-15 23:17:18 +0000 | [diff] [blame] | 976 | |
| 977 | |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 978 | def get_jobs(not_yet_run=False, running=False, finished=False, |
| 979 | suite=False, sub=False, standalone=False, **filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 980 | """\ |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 981 | Extra status filter args for get_jobs: |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 982 | -not_yet_run: Include only jobs that have not yet started running. |
| 983 | -running: Include only jobs that have start running but for which not |
| 984 | all hosts have completed. |
| 985 | -finished: Include only jobs for which all hosts have completed (or |
| 986 | aborted). |
| 987 | At most one of these three fields should be specified. |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 988 | |
| 989 | Extra type filter args for get_jobs: |
| 990 | -suite: Include only jobs with child jobs. |
| 991 | -sub: Include only jobs with a parent job. |
| 992 | -standalone: Inlcude only jobs with no child or parent jobs. |
| 993 | At most one of these three fields should be specified. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 994 | """ |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 995 | extra_args = rpc_utils.extra_job_status_filters(not_yet_run, |
| 996 | running, |
| 997 | finished) |
| 998 | filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args, |
| 999 | suite, |
| 1000 | sub, |
| 1001 | standalone) |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 1002 | job_dicts = [] |
| 1003 | jobs = list(models.Job.query_objects(filter_data)) |
| 1004 | models.Job.objects.populate_relationships(jobs, models.Label, |
| 1005 | 'dependencies') |
showard | c1a98d1 | 2010-01-15 00:22:22 +0000 | [diff] [blame] | 1006 | models.Job.objects.populate_relationships(jobs, models.JobKeyval, 'keyvals') |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 1007 | for job in jobs: |
| 1008 | job_dict = job.get_object_dict() |
| 1009 | job_dict['dependencies'] = ','.join(label.name |
| 1010 | for label in job.dependencies) |
showard | c1a98d1 | 2010-01-15 00:22:22 +0000 | [diff] [blame] | 1011 | job_dict['keyvals'] = dict((keyval.key, keyval.value) |
| 1012 | for keyval in job.keyvals) |
Eric Li | d23bc19 | 2011-02-09 14:38:57 -0800 | [diff] [blame] | 1013 | if job.parameterized_job: |
| 1014 | job_dict['image'] = get_parameterized_autoupdate_image_url(job) |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 1015 | job_dicts.append(job_dict) |
| 1016 | return rpc_utils.prepare_for_serialization(job_dicts) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1017 | |
| 1018 | |
| 1019 | def get_num_jobs(not_yet_run=False, running=False, finished=False, |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 1020 | suite=False, sub=False, standalone=False, |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1021 | **filter_data): |
| 1022 | """\ |
| 1023 | See get_jobs() for documentation of extra filter parameters. |
| 1024 | """ |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 1025 | extra_args = rpc_utils.extra_job_status_filters(not_yet_run, |
| 1026 | running, |
| 1027 | finished) |
| 1028 | filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args, |
| 1029 | suite, |
| 1030 | sub, |
| 1031 | standalone) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1032 | return models.Job.query_count(filter_data) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1033 | |
| 1034 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1035 | def get_jobs_summary(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1036 | """\ |
Jiaxi Luo | aac5457 | 2014-06-04 13:57:02 -0700 | [diff] [blame] | 1037 | Like get_jobs(), but adds 'status_counts' and 'result_counts' field. |
| 1038 | |
| 1039 | 'status_counts' filed is a dictionary mapping status strings to the number |
| 1040 | of hosts currently with that status, i.e. {'Queued' : 4, 'Running' : 2}. |
| 1041 | |
| 1042 | 'result_counts' field is piped to tko's rpc_interface and has the return |
| 1043 | format specified under get_group_counts. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1044 | """ |
| 1045 | jobs = get_jobs(**filter_data) |
| 1046 | ids = [job['id'] for job in jobs] |
| 1047 | all_status_counts = models.Job.objects.get_status_counts(ids) |
| 1048 | for job in jobs: |
| 1049 | job['status_counts'] = all_status_counts[job['id']] |
Jiaxi Luo | aac5457 | 2014-06-04 13:57:02 -0700 | [diff] [blame] | 1050 | job['result_counts'] = tko_rpc_interface.get_status_counts( |
| 1051 | ['afe_job_id', 'afe_job_id'], |
| 1052 | header_groups=[['afe_job_id'], ['afe_job_id']], |
| 1053 | **{'afe_job_id': job['id']}) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1054 | return rpc_utils.prepare_for_serialization(jobs) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1055 | |
| 1056 | |
showard | a965cef | 2009-05-15 23:17:41 +0000 | [diff] [blame] | 1057 | def get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None): |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1058 | """\ |
| 1059 | Retrieves all the information needed to clone a job. |
| 1060 | """ |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1061 | job = models.Job.objects.get(id=id) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1062 | job_info = rpc_utils.get_job_info(job, |
showard | a965cef | 2009-05-15 23:17:41 +0000 | [diff] [blame] | 1063 | preserve_metahosts, |
| 1064 | queue_entry_filter_data) |
showard | 945072f | 2008-09-03 20:34:59 +0000 | [diff] [blame] | 1065 | |
showard | d9992fe | 2008-07-31 02:15:03 +0000 | [diff] [blame] | 1066 | host_dicts = [] |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1067 | for host in job_info['hosts']: |
| 1068 | host_dict = get_hosts(id=host.id)[0] |
| 1069 | other_labels = host_dict['labels'] |
| 1070 | if host_dict['platform']: |
| 1071 | other_labels.remove(host_dict['platform']) |
| 1072 | host_dict['other_labels'] = ', '.join(other_labels) |
showard | d9992fe | 2008-07-31 02:15:03 +0000 | [diff] [blame] | 1073 | host_dicts.append(host_dict) |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1074 | |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1075 | for host in job_info['one_time_hosts']: |
| 1076 | host_dict = dict(hostname=host.hostname, |
| 1077 | id=host.id, |
| 1078 | platform='(one-time host)', |
| 1079 | locked_text='') |
| 1080 | host_dicts.append(host_dict) |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1081 | |
showard | 4d07756 | 2009-05-08 18:24:36 +0000 | [diff] [blame] | 1082 | # convert keys from Label objects to strings (names of labels) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1083 | meta_host_counts = dict((meta_host.name, count) for meta_host, count |
showard | 4d07756 | 2009-05-08 18:24:36 +0000 | [diff] [blame] | 1084 | in job_info['meta_host_counts'].iteritems()) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1085 | |
| 1086 | info = dict(job=job.get_object_dict(), |
| 1087 | meta_host_counts=meta_host_counts, |
| 1088 | hosts=host_dicts) |
| 1089 | info['job']['dependencies'] = job_info['dependencies'] |
| 1090 | if job_info['atomic_group']: |
| 1091 | info['atomic_group_name'] = (job_info['atomic_group']).name |
| 1092 | else: |
| 1093 | info['atomic_group_name'] = None |
jamesren | 2275ef1 | 2010-04-12 18:25:06 +0000 | [diff] [blame] | 1094 | info['hostless'] = job_info['hostless'] |
jamesren | 76fcf19 | 2010-04-21 20:39:50 +0000 | [diff] [blame] | 1095 | info['drone_set'] = job.drone_set and job.drone_set.name |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1096 | |
Eric Li | d23bc19 | 2011-02-09 14:38:57 -0800 | [diff] [blame] | 1097 | if job.parameterized_job: |
| 1098 | info['job']['image'] = get_parameterized_autoupdate_image_url(job) |
| 1099 | |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1100 | return rpc_utils.prepare_for_serialization(info) |
| 1101 | |
| 1102 | |
showard | 34dc5fa | 2008-04-24 20:58:40 +0000 | [diff] [blame] | 1103 | # host queue entries |
| 1104 | |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1105 | def get_host_queue_entries(start_time=None, end_time=None, **filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1106 | """\ |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1107 | @returns A sequence of nested dictionaries of host and job information. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1108 | """ |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1109 | filter_data = rpc_utils.inject_times_to_filter('started_on__gte', |
| 1110 | 'started_on__lte', |
| 1111 | start_time, |
| 1112 | end_time, |
| 1113 | **filter_data) |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1114 | return rpc_utils.prepare_rows_as_nested_dicts( |
| 1115 | models.HostQueueEntry.query_objects(filter_data), |
| 1116 | ('host', 'atomic_group', 'job')) |
showard | 34dc5fa | 2008-04-24 20:58:40 +0000 | [diff] [blame] | 1117 | |
| 1118 | |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1119 | def get_num_host_queue_entries(start_time=None, end_time=None, **filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1120 | """\ |
| 1121 | Get the number of host queue entries associated with this job. |
| 1122 | """ |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1123 | filter_data = rpc_utils.inject_times_to_filter('started_on__gte', |
| 1124 | 'started_on__lte', |
| 1125 | start_time, |
| 1126 | end_time, |
| 1127 | **filter_data) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1128 | return models.HostQueueEntry.query_count(filter_data) |
showard | 34dc5fa | 2008-04-24 20:58:40 +0000 | [diff] [blame] | 1129 | |
| 1130 | |
showard | 1e935f1 | 2008-07-11 00:11:36 +0000 | [diff] [blame] | 1131 | def get_hqe_percentage_complete(**filter_data): |
| 1132 | """ |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1133 | Computes the fraction of host queue entries matching the given filter data |
showard | 1e935f1 | 2008-07-11 00:11:36 +0000 | [diff] [blame] | 1134 | that are complete. |
| 1135 | """ |
| 1136 | query = models.HostQueueEntry.query_objects(filter_data) |
| 1137 | complete_count = query.filter(complete=True).count() |
| 1138 | total_count = query.count() |
| 1139 | if total_count == 0: |
| 1140 | return 1 |
| 1141 | return float(complete_count) / total_count |
| 1142 | |
| 1143 | |
showard | 1a5a408 | 2009-07-28 20:01:37 +0000 | [diff] [blame] | 1144 | # special tasks |
| 1145 | |
| 1146 | def get_special_tasks(**filter_data): |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1147 | """Get special task entries from the local database. |
| 1148 | |
| 1149 | Query the special tasks table for tasks matching the given |
| 1150 | `filter_data`, and return a list of the results. No attempt is |
| 1151 | made to forward the call to shards; the buck will stop here. |
| 1152 | The caller is expected to know the target shard for such reasons |
| 1153 | as: |
| 1154 | * The caller is a service (such as gs_offloader) configured |
| 1155 | to operate on behalf of one specific shard, and no other. |
| 1156 | * The caller has a host as a parameter, and knows that this is |
| 1157 | the shard assigned to that host. |
| 1158 | |
| 1159 | @param filter_data Filter keywords to pass to the underlying |
| 1160 | database query. |
| 1161 | |
| 1162 | """ |
J. Richard Barnette | fdfcd66 | 2015-04-13 17:20:29 -0700 | [diff] [blame] | 1163 | return rpc_utils.prepare_rows_as_nested_dicts( |
| 1164 | models.SpecialTask.query_objects(filter_data), |
| 1165 | ('host', 'queue_entry')) |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1166 | |
| 1167 | |
| 1168 | def get_host_special_tasks(host_id, **filter_data): |
| 1169 | """Get special task entries for a given host. |
| 1170 | |
| 1171 | Query the special tasks table for tasks that ran on the host |
| 1172 | given by `host_id` and matching the given `filter_data`. |
| 1173 | Return a list of the results. If the host is assigned to a |
| 1174 | shard, forward this call to that shard. |
| 1175 | |
| 1176 | @param host_id Id in the database of the target host. |
| 1177 | @param filter_data Filter keywords to pass to the underlying |
| 1178 | database query. |
| 1179 | |
| 1180 | """ |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1181 | # Retrieve host data even if the host is in an invalid state. |
| 1182 | host = models.Host.smart_get(host_id, False) |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1183 | if not host.shard: |
J. Richard Barnette | fdfcd66 | 2015-04-13 17:20:29 -0700 | [diff] [blame] | 1184 | return get_special_tasks(host_id=host_id, **filter_data) |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1185 | else: |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1186 | # The return values from AFE methods are post-processed |
| 1187 | # objects that aren't JSON-serializable. So, we have to |
| 1188 | # call AFE.run() to get the raw, serializable output from |
| 1189 | # the shard. |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1190 | shard_afe = frontend.AFE(server=host.shard.rpc_hostname()) |
| 1191 | return shard_afe.run('get_special_tasks', |
| 1192 | host_id=host_id, **filter_data) |
showard | 1a5a408 | 2009-07-28 20:01:37 +0000 | [diff] [blame] | 1193 | |
| 1194 | |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1195 | def get_num_special_tasks(**kwargs): |
| 1196 | """Get the number of special task entries from the local database. |
| 1197 | |
| 1198 | Query the special tasks table for tasks matching the given 'kwargs', |
| 1199 | and return the number of the results. No attempt is made to forward |
| 1200 | the call to shards; the buck will stop here. |
| 1201 | |
| 1202 | @param kwargs Filter keywords to pass to the underlying database query. |
| 1203 | |
| 1204 | """ |
| 1205 | return models.SpecialTask.query_count(kwargs) |
| 1206 | |
| 1207 | |
| 1208 | def get_host_num_special_tasks(host, **kwargs): |
| 1209 | """Get special task entries for a given host. |
| 1210 | |
| 1211 | Query the special tasks table for tasks that ran on the host |
| 1212 | given by 'host' and matching the given 'kwargs'. |
| 1213 | Return a list of the results. If the host is assigned to a |
| 1214 | shard, forward this call to that shard. |
| 1215 | |
| 1216 | @param host id or name of a host. More often a hostname. |
| 1217 | @param kwargs Filter keywords to pass to the underlying database query. |
| 1218 | |
| 1219 | """ |
| 1220 | # Retrieve host data even if the host is in an invalid state. |
| 1221 | host_model = models.Host.smart_get(host, False) |
| 1222 | if not host_model.shard: |
| 1223 | return get_num_special_tasks(host=host, **kwargs) |
| 1224 | else: |
| 1225 | shard_afe = frontend.AFE(server=host_model.shard.rpc_hostname()) |
| 1226 | return shard_afe.run('get_num_special_tasks', host=host, **kwargs) |
| 1227 | |
| 1228 | |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1229 | def get_status_task(host_id, end_time): |
J. Richard Barnette | 4d7e6e6 | 2015-05-01 10:47:34 -0700 | [diff] [blame] | 1230 | """Get the "status task" for a host from the local shard. |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1231 | |
J. Richard Barnette | 4d7e6e6 | 2015-05-01 10:47:34 -0700 | [diff] [blame] | 1232 | Returns a single special task representing the given host's |
| 1233 | "status task". The status task is a completed special task that |
| 1234 | identifies whether the corresponding host was working or broken |
| 1235 | when it completed. A successful task indicates a working host; |
| 1236 | a failed task indicates broken. |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1237 | |
J. Richard Barnette | 4d7e6e6 | 2015-05-01 10:47:34 -0700 | [diff] [blame] | 1238 | This call will not be forward to a shard; the receiving server |
| 1239 | must be the shard that owns the host. |
| 1240 | |
| 1241 | @param host_id Id in the database of the target host. |
| 1242 | @param end_time Time reference for the host's status. |
| 1243 | |
| 1244 | @return A single task; its status (successful or not) |
| 1245 | corresponds to the status of the host (working or |
| 1246 | broken) at the given time. If no task is found, return |
| 1247 | `None`. |
| 1248 | |
| 1249 | """ |
| 1250 | tasklist = rpc_utils.prepare_rows_as_nested_dicts( |
| 1251 | status_history.get_status_task(host_id, end_time), |
| 1252 | ('host', 'queue_entry')) |
| 1253 | return tasklist[0] if tasklist else None |
| 1254 | |
| 1255 | |
| 1256 | def get_host_status_task(host_id, end_time): |
| 1257 | """Get the "status task" for a host from its owning shard. |
| 1258 | |
| 1259 | Finds the given host's owning shard, and forwards to it a call |
| 1260 | to `get_status_task()` (see above). |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1261 | |
| 1262 | @param host_id Id in the database of the target host. |
| 1263 | @param end_time Time reference for the host's status. |
| 1264 | |
| 1265 | @return A single task; its status (successful or not) |
| 1266 | corresponds to the status of the host (working or |
| 1267 | broken) at the given time. If no task is found, return |
| 1268 | `None`. |
| 1269 | |
| 1270 | """ |
| 1271 | host = models.Host.smart_get(host_id) |
| 1272 | if not host.shard: |
J. Richard Barnette | 4d7e6e6 | 2015-05-01 10:47:34 -0700 | [diff] [blame] | 1273 | return get_status_task(host_id, end_time) |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1274 | else: |
| 1275 | # The return values from AFE methods are post-processed |
| 1276 | # objects that aren't JSON-serializable. So, we have to |
| 1277 | # call AFE.run() to get the raw, serializable output from |
| 1278 | # the shard. |
| 1279 | shard_afe = frontend.AFE(server=host.shard.rpc_hostname()) |
| 1280 | return shard_afe.run('get_status_task', |
| 1281 | host_id=host_id, end_time=end_time) |
| 1282 | |
| 1283 | |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1284 | # support for host detail view |
| 1285 | |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1286 | def get_host_queue_entries_and_special_tasks(host, query_start=None, |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1287 | query_limit=None, start_time=None, |
| 1288 | end_time=None): |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1289 | """ |
| 1290 | @returns an interleaved list of HostQueueEntries and SpecialTasks, |
| 1291 | in approximate run order. each dict contains keys for type, host, |
| 1292 | job, status, started_on, execution_path, and ID. |
| 1293 | """ |
| 1294 | total_limit = None |
| 1295 | if query_limit is not None: |
| 1296 | total_limit = query_start + query_limit |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1297 | filter_data_common = {'host': host, |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1298 | 'query_limit': total_limit, |
| 1299 | 'sort_by': ['-id']} |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1300 | |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1301 | filter_data_special_tasks = rpc_utils.inject_times_to_filter( |
| 1302 | 'time_started__gte', 'time_started__lte', start_time, end_time, |
| 1303 | **filter_data_common) |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1304 | |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1305 | queue_entries = get_host_queue_entries( |
| 1306 | start_time, end_time, **filter_data_common) |
| 1307 | special_tasks = get_host_special_tasks(host, **filter_data_special_tasks) |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1308 | |
| 1309 | interleaved_entries = rpc_utils.interleave_entries(queue_entries, |
| 1310 | special_tasks) |
| 1311 | if query_start is not None: |
| 1312 | interleaved_entries = interleaved_entries[query_start:] |
| 1313 | if query_limit is not None: |
| 1314 | interleaved_entries = interleaved_entries[:query_limit] |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1315 | return rpc_utils.prepare_host_queue_entries_and_special_tasks( |
| 1316 | interleaved_entries, queue_entries) |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1317 | |
| 1318 | |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1319 | def get_num_host_queue_entries_and_special_tasks(host, start_time=None, |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1320 | end_time=None): |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1321 | filter_data_common = {'host': host} |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1322 | |
| 1323 | filter_data_queue_entries, filter_data_special_tasks = ( |
| 1324 | rpc_utils.inject_times_to_hqe_special_tasks_filters( |
| 1325 | filter_data_common, start_time, end_time)) |
| 1326 | |
| 1327 | return (models.HostQueueEntry.query_count(filter_data_queue_entries) |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1328 | + get_host_num_special_tasks(**filter_data_special_tasks)) |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1329 | |
| 1330 | |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1331 | # recurring run |
| 1332 | |
| 1333 | def get_recurring(**filter_data): |
| 1334 | return rpc_utils.prepare_rows_as_nested_dicts( |
| 1335 | models.RecurringRun.query_objects(filter_data), |
| 1336 | ('job', 'owner')) |
| 1337 | |
| 1338 | |
| 1339 | def get_num_recurring(**filter_data): |
| 1340 | return models.RecurringRun.query_count(filter_data) |
| 1341 | |
| 1342 | |
| 1343 | def delete_recurring_runs(**filter_data): |
| 1344 | to_delete = models.RecurringRun.query_objects(filter_data) |
| 1345 | to_delete.delete() |
| 1346 | |
| 1347 | |
| 1348 | def create_recurring_run(job_id, start_date, loop_period, loop_count): |
showard | 64a9595 | 2010-01-13 21:27:16 +0000 | [diff] [blame] | 1349 | owner = models.User.current_user().login |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1350 | job = models.Job.objects.get(id=job_id) |
| 1351 | return job.create_recurring_job(start_date=start_date, |
| 1352 | loop_period=loop_period, |
| 1353 | loop_count=loop_count, |
| 1354 | owner=owner) |
| 1355 | |
| 1356 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1357 | # other |
| 1358 | |
showard | e0b6362 | 2008-08-04 20:58:47 +0000 | [diff] [blame] | 1359 | def echo(data=""): |
| 1360 | """\ |
| 1361 | Returns a passed in string. For doing a basic test to see if RPC calls |
| 1362 | can successfully be made. |
| 1363 | """ |
| 1364 | return data |
| 1365 | |
| 1366 | |
showard | b7a52fd | 2009-04-27 20:10:56 +0000 | [diff] [blame] | 1367 | def get_motd(): |
| 1368 | """\ |
| 1369 | Returns the message of the day as a string. |
| 1370 | """ |
| 1371 | return rpc_utils.get_motd() |
| 1372 | |
| 1373 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1374 | def get_static_data(): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1375 | """\ |
| 1376 | Returns a dictionary containing a bunch of data that shouldn't change |
| 1377 | often and is otherwise inaccessible. This includes: |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1378 | |
| 1379 | priorities: List of job priority choices. |
| 1380 | default_priority: Default priority value for new jobs. |
| 1381 | users: Sorted list of all users. |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 1382 | labels: Sorted list of labels not start with 'cros-version' and |
| 1383 | 'fw-version'. |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1384 | atomic_groups: Sorted list of all atomic groups. |
| 1385 | tests: Sorted list of all tests. |
| 1386 | profilers: Sorted list of all profilers. |
| 1387 | current_user: Logged-in username. |
| 1388 | host_statuses: Sorted list of possible Host statuses. |
| 1389 | job_statuses: Sorted list of possible HostQueueEntry statuses. |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 1390 | job_timeout_default: The default job timeout length in minutes. |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 1391 | parse_failed_repair_default: Default value for the parse_failed_repair job |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 1392 | option. |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1393 | reboot_before_options: A list of valid RebootBefore string enums. |
| 1394 | reboot_after_options: A list of valid RebootAfter string enums. |
| 1395 | motd: Server's message of the day. |
| 1396 | status_dictionary: A mapping from one word job status names to a more |
| 1397 | informative description. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1398 | """ |
showard | 21baa45 | 2008-10-21 00:08:39 +0000 | [diff] [blame] | 1399 | |
| 1400 | job_fields = models.Job.get_field_dict() |
jamesren | 76fcf19 | 2010-04-21 20:39:50 +0000 | [diff] [blame] | 1401 | default_drone_set_name = models.DroneSet.default_drone_set_name() |
| 1402 | drone_sets = ([default_drone_set_name] + |
| 1403 | sorted(drone_set.name for drone_set in |
| 1404 | models.DroneSet.objects.exclude( |
| 1405 | name=default_drone_set_name))) |
showard | 21baa45 | 2008-10-21 00:08:39 +0000 | [diff] [blame] | 1406 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1407 | result = {} |
Alex Miller | 7d658cf | 2013-09-04 16:00:35 -0700 | [diff] [blame] | 1408 | result['priorities'] = priorities.Priority.choices() |
| 1409 | default_priority = priorities.Priority.DEFAULT |
| 1410 | result['default_priority'] = 'Default' |
| 1411 | result['max_schedulable_priority'] = priorities.Priority.DEFAULT |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1412 | result['users'] = get_users(sort_by=['login']) |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 1413 | |
| 1414 | label_exclude_filters = [{'name__startswith': 'cros-version'}, |
| 1415 | {'name__startswith': 'fw-version'}] |
| 1416 | result['labels'] = get_labels( |
| 1417 | label_exclude_filters, |
| 1418 | sort_by=['-platform', 'name']) |
| 1419 | |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1420 | result['atomic_groups'] = get_atomic_groups(sort_by=['name']) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1421 | result['tests'] = get_tests(sort_by=['name']) |
showard | 2b9a88b | 2008-06-13 20:55:03 +0000 | [diff] [blame] | 1422 | result['profilers'] = get_profilers(sort_by=['name']) |
showard | 0fc3830 | 2008-10-23 00:44:07 +0000 | [diff] [blame] | 1423 | result['current_user'] = rpc_utils.prepare_for_serialization( |
showard | 64a9595 | 2010-01-13 21:27:16 +0000 | [diff] [blame] | 1424 | models.User.current_user().get_object_dict()) |
showard | 2b9a88b | 2008-06-13 20:55:03 +0000 | [diff] [blame] | 1425 | result['host_statuses'] = sorted(models.Host.Status.names) |
mbligh | 5a198b9 | 2008-12-11 19:33:29 +0000 | [diff] [blame] | 1426 | result['job_statuses'] = sorted(models.HostQueueEntry.Status.names) |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 1427 | result['job_timeout_mins_default'] = models.Job.DEFAULT_TIMEOUT_MINS |
Simran Basi | 3421702 | 2012-11-06 13:43:15 -0800 | [diff] [blame] | 1428 | result['job_max_runtime_mins_default'] = ( |
| 1429 | models.Job.DEFAULT_MAX_RUNTIME_MINS) |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 1430 | result['parse_failed_repair_default'] = bool( |
| 1431 | models.Job.DEFAULT_PARSE_FAILED_REPAIR) |
jamesren | dd85524 | 2010-03-02 22:23:44 +0000 | [diff] [blame] | 1432 | result['reboot_before_options'] = model_attributes.RebootBefore.names |
| 1433 | result['reboot_after_options'] = model_attributes.RebootAfter.names |
showard | 8fbae65 | 2009-01-20 23:23:10 +0000 | [diff] [blame] | 1434 | result['motd'] = rpc_utils.get_motd() |
jamesren | 76fcf19 | 2010-04-21 20:39:50 +0000 | [diff] [blame] | 1435 | result['drone_sets_enabled'] = models.DroneSet.drone_sets_enabled() |
| 1436 | result['drone_sets'] = drone_sets |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 1437 | result['parameterized_jobs'] = models.Job.parameterized_jobs_enabled() |
showard | 8ac29b4 | 2008-07-17 17:01:55 +0000 | [diff] [blame] | 1438 | |
showard | d3dc199 | 2009-04-22 21:01:40 +0000 | [diff] [blame] | 1439 | result['status_dictionary'] = {"Aborted": "Aborted", |
showard | 8ac29b4 | 2008-07-17 17:01:55 +0000 | [diff] [blame] | 1440 | "Verifying": "Verifying Host", |
Alex Miller | dfff2fd | 2013-05-28 13:05:06 -0700 | [diff] [blame] | 1441 | "Provisioning": "Provisioning Host", |
showard | 8ac29b4 | 2008-07-17 17:01:55 +0000 | [diff] [blame] | 1442 | "Pending": "Waiting on other hosts", |
| 1443 | "Running": "Running autoserv", |
| 1444 | "Completed": "Autoserv completed", |
| 1445 | "Failed": "Failed to complete", |
showard | d823b36 | 2008-07-24 16:35:46 +0000 | [diff] [blame] | 1446 | "Queued": "Queued", |
showard | 5deb677 | 2008-11-04 21:54:33 +0000 | [diff] [blame] | 1447 | "Starting": "Next in host's queue", |
| 1448 | "Stopped": "Other host(s) failed verify", |
showard | d3dc199 | 2009-04-22 21:01:40 +0000 | [diff] [blame] | 1449 | "Parsing": "Awaiting parse of final results", |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1450 | "Gathering": "Gathering log files", |
showard | 8cc058f | 2009-09-08 16:26:33 +0000 | [diff] [blame] | 1451 | "Template": "Template job for recurring run", |
mbligh | 4608b00 | 2010-01-05 18:22:35 +0000 | [diff] [blame] | 1452 | "Waiting": "Waiting for scheduler action", |
Dan Shi | 07e09af | 2013-04-12 09:31:29 -0700 | [diff] [blame] | 1453 | "Archiving": "Archiving results", |
| 1454 | "Resetting": "Resetting hosts"} |
Jiaxi Luo | 421608e | 2014-07-07 14:38:00 -0700 | [diff] [blame] | 1455 | |
| 1456 | result['wmatrix_url'] = rpc_utils.get_wmatrix_url() |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 1457 | result['is_moblab'] = bool(utils.is_moblab()) |
Jiaxi Luo | 421608e | 2014-07-07 14:38:00 -0700 | [diff] [blame] | 1458 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1459 | return result |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1460 | |
| 1461 | |
| 1462 | def get_server_time(): |
| 1463 | return datetime.datetime.now().strftime("%Y-%m-%d %H:%M") |