blob: 618841e4f7d8547bdbb49fd9f5c154bacd1d648d [file] [log] [blame]
Dan Shi4df39252013-03-19 13:19:45 -07001# pylint: disable-msg=C0111
2
Chris Masone859fdec2012-01-30 08:38:09 -08003# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7__author__ = 'cmasone@chromium.org (Chris Masone)'
8
9import common
Chris Masonea8066a92012-05-01 16:52:31 -070010import datetime
Chris Masone859fdec2012-01-30 08:38:09 -080011import logging
Simran Basi71206ef2014-08-13 13:51:18 -070012import os
13import shutil
Aviv Keshetd83ef442013-01-16 16:19:35 -080014
Jakob Juelich82b7d1c2014-09-15 16:10:57 -070015from autotest_lib.frontend.afe import models
Aviv Keshetd83ef442013-01-16 16:19:35 -080016from autotest_lib.client.common_lib import error
Simran Basi71206ef2014-08-13 13:51:18 -070017from autotest_lib.client.common_lib import global_config
Alex Miller7d658cf2013-09-04 16:00:35 -070018from autotest_lib.client.common_lib import priorities
Dan Shidfea3682014-08-10 23:38:40 -070019from autotest_lib.client.common_lib import time_utils
Chris Masone859fdec2012-01-30 08:38:09 -080020from autotest_lib.client.common_lib.cros import dev_server
Jakob Juelich59cfe542014-09-02 16:37:46 -070021from autotest_lib.client.common_lib.cros.graphite import stats
Jakob Juelich9fffe4f2014-08-14 18:07:05 -070022from autotest_lib.frontend.afe import rpc_utils
Simran Basib6ec8ae2014-04-23 12:05:08 -070023from autotest_lib.server import utils
Chris Masone44e4d6c2012-08-15 14:25:53 -070024from autotest_lib.server.cros.dynamic_suite import constants
Chris Masoneb4935552012-08-14 12:05:54 -070025from autotest_lib.server.cros.dynamic_suite import control_file_getter
Chris Masone44e4d6c2012-08-15 14:25:53 -070026from autotest_lib.server.cros.dynamic_suite import tools
Simran Basi71206ef2014-08-13 13:51:18 -070027from autotest_lib.server.hosts import moblab_host
Dan Shidfea3682014-08-10 23:38:40 -070028from autotest_lib.site_utils import host_history
Dan Shi193905e2014-07-25 23:33:09 -070029from autotest_lib.site_utils import job_history
Simran Basi71206ef2014-08-13 13:51:18 -070030
31
32_CONFIG = global_config.global_config
33MOBLAB_BOTO_LOCATION = '/home/moblab/.boto'
Chris Masone859fdec2012-01-30 08:38:09 -080034
Chris Masonef8b53062012-05-08 22:14:18 -070035# Relevant CrosDynamicSuiteExceptions are defined in client/common_lib/error.py.
Chris Masone859fdec2012-01-30 08:38:09 -080036
37
Chris Masone62579122012-03-08 15:18:43 -080038def canonicalize_suite_name(suite_name):
39 return 'test_suites/control.%s' % suite_name
40
41
Chris Masoneaa10f8e2012-05-15 13:34:21 -070042def formatted_now():
Dan Shidfea3682014-08-10 23:38:40 -070043 return datetime.datetime.now().strftime(time_utils.TIME_FMT)
Chris Masoneaa10f8e2012-05-15 13:34:21 -070044
45
Simran Basib6ec8ae2014-04-23 12:05:08 -070046def _get_control_file_contents_by_name(build, ds, suite_name):
Chris Masone8dd27e02012-06-25 15:59:43 -070047 """Return control file contents for |suite_name|.
48
49 Query the dev server at |ds| for the control file |suite_name|, included
50 in |build| for |board|.
51
52 @param build: unique name by which to refer to the image from now on.
Chris Masone8dd27e02012-06-25 15:59:43 -070053 @param ds: a dev_server.DevServer instance to fetch control file with.
54 @param suite_name: canonicalized suite name, e.g. test_suites/control.bvt.
55 @raises ControlFileNotFound if a unique suite control file doesn't exist.
56 @raises NoControlFileList if we can't list the control files at all.
57 @raises ControlFileEmpty if the control file exists on the server, but
58 can't be read.
59
60 @return the contents of the desired control file.
61 """
62 getter = control_file_getter.DevServerGetter.create(build, ds)
63 # Get the control file for the suite.
64 try:
65 control_file_in = getter.get_control_file_contents_by_name(suite_name)
66 except error.CrosDynamicSuiteException as e:
Simran Basib6ec8ae2014-04-23 12:05:08 -070067 raise type(e)("%s while testing %s." % (e, build))
Chris Masone8dd27e02012-06-25 15:59:43 -070068 if not control_file_in:
69 raise error.ControlFileEmpty(
70 "Fetching %s returned no data." % suite_name)
Alex Millera713e252013-03-01 10:45:44 -080071 # Force control files to only contain ascii characters.
72 try:
73 control_file_in.encode('ascii')
74 except UnicodeDecodeError as e:
75 raise error.ControlFileMalformed(str(e))
76
Chris Masone8dd27e02012-06-25 15:59:43 -070077 return control_file_in
78
79
Simran Basib6ec8ae2014-04-23 12:05:08 -070080def _stage_build_artifacts(build):
81 """
82 Ensure components of |build| necessary for installing images are staged.
83
84 @param build image we want to stage.
85
Prashanth B6285f6a2014-05-08 18:01:27 -070086 @raises StageControlFileFailure: if the dev server throws 500 while staging
87 suite control files.
Simran Basib6ec8ae2014-04-23 12:05:08 -070088
89 @return: dev_server.ImageServer instance to use with this build.
90 @return: timings dictionary containing staging start/end times.
91 """
92 timings = {}
Prashanth B6285f6a2014-05-08 18:01:27 -070093 # Ensure components of |build| necessary for installing images are staged
94 # on the dev server. However set synchronous to False to allow other
95 # components to be downloaded in the background.
Simran Basib6ec8ae2014-04-23 12:05:08 -070096 ds = dev_server.ImageServer.resolve(build)
97 timings[constants.DOWNLOAD_STARTED_TIME] = formatted_now()
98 try:
99 ds.stage_artifacts(build, ['test_suites'])
100 except dev_server.DevServerException as e:
Prashanth B6285f6a2014-05-08 18:01:27 -0700101 raise error.StageControlFileFailure(
Simran Basib6ec8ae2014-04-23 12:05:08 -0700102 "Failed to stage %s: %s" % (build, e))
103 timings[constants.PAYLOAD_FINISHED_TIME] = formatted_now()
104 return (ds, timings)
105
106
107def create_suite_job(name='', board='', build='', pool='', control_file='',
108 check_hosts=True, num=None, file_bugs=False, timeout=24,
109 timeout_mins=None, priority=priorities.Priority.DEFAULT,
Fang Deng058860c2014-05-15 15:41:50 -0700110 suite_args=None, wait_for_results=True, job_retry=False,
Simran Basi102e3522014-09-11 11:46:10 -0700111 max_runtime_mins=None, **kwargs):
Chris Masone859fdec2012-01-30 08:38:09 -0800112 """
113 Create a job to run a test suite on the given device with the given image.
114
115 When the timeout specified in the control file is reached, the
116 job is guaranteed to have completed and results will be available.
117
Simran Basib6ec8ae2014-04-23 12:05:08 -0700118 @param name: The test name if control_file is supplied, otherwise the name
119 of the test suite to run, e.g. 'bvt'.
Chris Masone859fdec2012-01-30 08:38:09 -0800120 @param board: the kind of device to run the tests on.
121 @param build: unique name by which to refer to the image from now on.
Scott Zawalski65650172012-02-16 11:48:26 -0500122 @param pool: Specify the pool of machines to use for scheduling
123 purposes.
Chris Masone62579122012-03-08 15:18:43 -0800124 @param check_hosts: require appropriate live hosts to exist in the lab.
Aviv Keshetd83ef442013-01-16 16:19:35 -0800125 @param num: Specify the number of machines to schedule across (integer).
126 Leave unspecified or use None to use default sharding factor.
Alex Millerc577f3e2012-09-27 14:06:07 -0700127 @param file_bugs: File a bug on each test failure in this suite.
Alex Miller139690b2013-09-07 15:35:49 -0700128 @param timeout: The max lifetime of this suite, in hours.
Simran Basi7e605742013-11-12 13:43:36 -0800129 @param timeout_mins: The max lifetime of this suite, in minutes. Takes
130 priority over timeout.
Alex Miller139690b2013-09-07 15:35:49 -0700131 @param priority: Integer denoting priority. Higher is more important.
Aviv Keshet7cd12312013-07-25 10:25:55 -0700132 @param suite_args: Optional arguments which will be parsed by the suite
133 control file. Used by control.test_that_wrapper to
134 determine which tests to run.
Dan Shi95122412013-11-12 16:20:33 -0800135 @param wait_for_results: Set to False to run the suite job without waiting
136 for test jobs to finish. Default is True.
Fang Deng058860c2014-05-15 15:41:50 -0700137 @param job_retry: Set to True to enable job-level retry. Default is False.
Simran Basi102e3522014-09-11 11:46:10 -0700138 @param max_runtime_mins: Maximum amount of time a job can be running in
139 minutes.
Simran Basib6ec8ae2014-04-23 12:05:08 -0700140 @param kwargs: extra keyword args. NOT USED.
Chris Masone859fdec2012-01-30 08:38:09 -0800141
Chris Masone8dd27e02012-06-25 15:59:43 -0700142 @raises ControlFileNotFound: if a unique suite control file doesn't exist.
143 @raises NoControlFileList: if we can't list the control files at all.
Prashanth B6285f6a2014-05-08 18:01:27 -0700144 @raises StageControlFileFailure: If the dev server throws 500 while
145 staging test_suites.
Chris Masone8dd27e02012-06-25 15:59:43 -0700146 @raises ControlFileEmpty: if the control file exists on the server, but
147 can't be read.
Chris Masone859fdec2012-01-30 08:38:09 -0800148
149 @return: the job ID of the suite; -1 on error.
150 """
Aviv Keshetd83ef442013-01-16 16:19:35 -0800151 if type(num) is not int and num is not None:
Chris Sosa18c70b32013-02-15 14:12:43 -0800152 raise error.SuiteArgumentException('Ill specified num argument %r. '
153 'Must be an integer or None.' % num)
Aviv Keshetd83ef442013-01-16 16:19:35 -0800154 if num == 0:
155 logging.warning("Can't run on 0 hosts; using default.")
156 num = None
Simran Basib6ec8ae2014-04-23 12:05:08 -0700157 (ds, timings) = _stage_build_artifacts(build)
Chris Masone859fdec2012-01-30 08:38:09 -0800158
Simran Basib6ec8ae2014-04-23 12:05:08 -0700159 if not control_file:
160 # No control file was supplied so look it up from the build artifacts.
161 suite_name = canonicalize_suite_name(name)
162 control_file = _get_control_file_contents_by_name(build, ds, suite_name)
163 name = '%s-%s' % (build, suite_name)
Chris Masone46d0eb12012-07-27 18:56:39 -0700164
Simran Basi7e605742013-11-12 13:43:36 -0800165 timeout_mins = timeout_mins or timeout * 60
Simran Basi102e3522014-09-11 11:46:10 -0700166 max_runtime_mins = max_runtime_mins or timeout * 60
Simran Basi7e605742013-11-12 13:43:36 -0800167
Simran Basib6ec8ae2014-04-23 12:05:08 -0700168 if not board:
169 board = utils.ParseBuildName(build)[0]
Chris Masone46d0eb12012-07-27 18:56:39 -0700170
Simran Basib6ec8ae2014-04-23 12:05:08 -0700171 # Prepend build and board to the control file.
Scott Zawalski65650172012-02-16 11:48:26 -0500172 inject_dict = {'board': board,
173 'build': build,
Chris Masone62579122012-03-08 15:18:43 -0800174 'check_hosts': check_hosts,
Chris Masone46d0eb12012-07-27 18:56:39 -0700175 'pool': pool,
Aviv Keshetd83ef442013-01-16 16:19:35 -0800176 'num': num,
Dan Shib8a99112013-06-18 13:46:10 -0700177 'file_bugs': file_bugs,
Alex Miller139690b2013-09-07 15:35:49 -0700178 'timeout': timeout,
Simran Basi7e605742013-11-12 13:43:36 -0800179 'timeout_mins': timeout_mins,
Alex Miller7d658cf2013-09-04 16:00:35 -0700180 'devserver_url': ds.url(),
Aviv Keshet7cd12312013-07-25 10:25:55 -0700181 'priority': priority,
Dan Shi95122412013-11-12 16:20:33 -0800182 'suite_args' : suite_args,
Fang Deng058860c2014-05-15 15:41:50 -0700183 'wait_for_results': wait_for_results,
Simran Basi102e3522014-09-11 11:46:10 -0700184 'job_retry': job_retry,
185 'max_runtime_mins': max_runtime_mins
Aviv Keshet7cd12312013-07-25 10:25:55 -0700186 }
187
Simran Basib6ec8ae2014-04-23 12:05:08 -0700188 control_file = tools.inject_vars(inject_dict, control_file)
Chris Masone859fdec2012-01-30 08:38:09 -0800189
Jakob Juelich9fffe4f2014-08-14 18:07:05 -0700190 return rpc_utils.create_job_common(name,
Jakob Juelich59cfe542014-09-02 16:37:46 -0700191 priority=priority,
192 timeout_mins=timeout_mins,
193 max_runtime_mins=max_runtime_mins,
194 control_type='Server',
195 control_file=control_file,
196 hostless=True,
197 keyvals=timings)
Simran Basi71206ef2014-08-13 13:51:18 -0700198
199
200# TODO: hide the following rpcs under is_moblab
201def moblab_only(func):
202 """Ensure moblab specific functions only run on Moblab devices."""
203 def verify(*args, **kwargs):
204 if not utils.is_moblab():
205 raise error.RPCException('RPC: %s can only run on Moblab Systems!',
206 func.__name__)
207 return func(*args, **kwargs)
208 return verify
209
210
211@moblab_only
212def get_config_values():
213 """Returns all config values parsed from global and shadow configs.
214
215 Config values are grouped by sections, and each section is composed of
216 a list of name value pairs.
217 """
218 sections =_CONFIG.get_sections()
219 config_values = {}
220 for section in sections:
221 config_values[section] = _CONFIG.config.items(section)
Jakob Juelich9fffe4f2014-08-14 18:07:05 -0700222 return rpc_utils.prepare_for_serialization(config_values)
Simran Basi71206ef2014-08-13 13:51:18 -0700223
224
225@moblab_only
226def update_config_handler(config_values):
227 """
228 Update config values and override shadow config.
229
230 @param config_values: See get_moblab_settings().
231 """
232 for section, config_value_list in config_values.iteritems():
233 for key, value in config_value_list:
234 _CONFIG.override_config_value(section, key, value)
235 if not _CONFIG.shadow_file or not os.path.exists(_CONFIG.shadow_file):
236 raise error.RPCException('Shadow config file does not exist.')
237
238 with open(_CONFIG.shadow_file, 'w') as config_file:
239 _CONFIG.config.write(config_file)
240 # TODO (sbasi) crbug.com/403916 - Remove the reboot command and
241 # instead restart the services that rely on the config values.
242 os.system('sudo reboot')
243
244
245@moblab_only
246def reset_config_settings():
247 with open(_CONFIG.shadow_file, 'w') as config_file:
248 pass
249 os.system('sudo reboot')
250
251
252@moblab_only
253def set_boto_key(boto_key):
254 """Update the boto_key file.
255
256 @param boto_key: File name of boto_key uploaded through handle_file_upload.
257 """
258 if not os.path.exists(boto_key):
259 raise error.RPCException('Boto key: %s does not exist!' % boto_key)
260 shutil.copyfile(boto_key, moblab_host.MOBLAB_BOTO_LOCATION)
Dan Shi193905e2014-07-25 23:33:09 -0700261
262
263def get_job_history(**filter_data):
264 """Get history of the job, including the special tasks executed for the job
265
266 @param filter_data: filter for the call, should at least include
267 {'job_id': [job id]}
268 @returns: JSON string of the job's history, including the information such
269 as the hosts run the job and the special tasks executed before
270 and after the job.
271 """
272 job_id = filter_data['job_id']
273 job_info = job_history.get_job_info(job_id)
Dan Shidfea3682014-08-10 23:38:40 -0700274 return rpc_utils.prepare_for_serialization(job_info.get_history())
275
276
277def get_host_history(start_time, end_time, hosts=None, board=None, pool=None):
278 """Get history of a list of host.
279
280 The return is a JSON string of host history for each host, for example,
281 {'172.22.33.51': [{'status': 'Resetting'
282 'start_time': '2014-08-07 10:02:16',
283 'end_time': '2014-08-07 10:03:16',
284 'log_url': 'http://autotest/reset-546546/debug',
285 'dbg_str': 'Task: Special Task 19441991 (host ...)'},
286 {'status': 'Running'
287 'start_time': '2014-08-07 10:03:18',
288 'end_time': '2014-08-07 10:13:00',
289 'log_url': 'http://autotest/reset-546546/debug',
290 'dbg_str': 'HQE: 15305005, for job: 14995562'}
291 ]
292 }
293 @param start_time: start time to search for history, can be string value or
294 epoch time.
295 @param end_time: end time to search for history, can be string value or
296 epoch time.
297 @param hosts: A list of hosts to search for history. Default is None.
298 @param board: board type of hosts. Default is None.
299 @param pool: pool type of hosts. Default is None.
300 @returns: JSON string of the host history.
301 """
302 return rpc_utils.prepare_for_serialization(
303 host_history.get_history_details(
304 start_time=start_time, end_time=end_time,
305 hosts=hosts, board=board, pool=pool,
306 process_pool_size=4))
Jakob Juelich59cfe542014-09-02 16:37:46 -0700307
308
Jakob Juelich1b525742014-09-30 13:08:07 -0700309def shard_heartbeat(shard_hostname, jobs=(), hqes=(),
310 known_job_ids=(), known_host_ids=()):
311 """Receive updates for job statuses from shards and assign hosts and jobs.
Jakob Juelich59cfe542014-09-02 16:37:46 -0700312
313 @param shard_hostname: Hostname of the calling shard
Jakob Juelicha94efe62014-09-18 16:02:49 -0700314 @param jobs: Jobs in serialized form that should be updated with newer
315 status from a shard.
316 @param hqes: Hostqueueentries in serialized form that should be updated with
317 newer status from a shard. Note that for every hostqueueentry
318 the corresponding job must be in jobs.
Jakob Juelich1b525742014-09-30 13:08:07 -0700319 @param known_job_ids: List of ids of jobs the shard already has.
320 @param known_host_ids: List of ids of hosts the shard already has.
Jakob Juelicha94efe62014-09-18 16:02:49 -0700321
Jakob Juelich59cfe542014-09-02 16:37:46 -0700322 @returns: Serialized representations of hosts, jobs and their dependencies
323 to be inserted into a shard's database.
324 """
Jakob Juelich1b525742014-09-30 13:08:07 -0700325 # The following alternatives to sending host and job ids in every heartbeat
326 # have been considered:
327 # 1. Sending the highest known job and host ids. This would work for jobs:
328 # Newer jobs always have larger ids. Also, if a job is not assigned to a
329 # particular shard during a heartbeat, it never will be assigned to this
330 # shard later.
331 # This is not true for hosts though: A host that is leased won't be sent
332 # to the shard now, but might be sent in a future heartbeat. This means
333 # sometimes hosts should be transfered that have a lower id than the
334 # maximum host id the shard knows.
335 # 2. Send the number of jobs/hosts the shard knows to the master in each
336 # heartbeat. Compare these to the number of records that already have
337 # the shard_id set to this shard. In the normal case, they should match.
338 # In case they don't, resend all entities of that type.
339 # This would work well for hosts, because there aren't that many.
340 # Resending all jobs is quite a big overhead though.
341 # Also, this approach might run into edge cases when entities are
342 # ever deleted.
343 # 3. Mixtures of the above: Use 1 for jobs and 2 for hosts.
344 # Using two different approaches isn't consistent and might cause
345 # confusion. Also the issues with the case of deletions might still
346 # occur.
347 #
348 # The overhead of sending all job and host ids in every heartbeat is low:
349 # At peaks one board has about 1200 created but unfinished jobs.
350 # See the numbers here: http://goo.gl/gQCGWH
351 # Assuming that job id's have 6 digits and that json serialization takes a
352 # comma and a space as overhead, the traffic per id sent is about 8 bytes.
353 # If 5000 ids need to be sent, this means 40 kilobytes of traffic.
354 # A NOT IN query with 5000 ids took about 30ms in tests made.
355 # These numbers seem low enough to outweigh the disadvantages of the
356 # solutions described above.
Jakob Juelich59cfe542014-09-02 16:37:46 -0700357 timer = stats.Timer('shard_heartbeat')
358 with timer:
359 shard_obj = rpc_utils.retrieve_shard(shard_hostname=shard_hostname)
Jakob Juelicha94efe62014-09-18 16:02:49 -0700360 rpc_utils.persist_records_sent_from_shard(shard_obj, jobs, hqes)
Jakob Juelich1b525742014-09-30 13:08:07 -0700361 hosts, jobs = rpc_utils.find_records_for_shard(
362 shard_obj,
363 known_job_ids=known_job_ids, known_host_ids=known_host_ids)
Jakob Juelich59cfe542014-09-02 16:37:46 -0700364 return {
365 'hosts': [host.serialize() for host in hosts],
366 'jobs': [job.serialize() for job in jobs],
367 }
Jakob Juelich82b7d1c2014-09-15 16:10:57 -0700368
369
370def get_shards(**filter_data):
371 """Return a list of all shards.
372
373 @returns A sequence of nested dictionaries of shard information.
374 """
375 shards = models.Shard.query_objects(filter_data)
376 serialized_shards = rpc_utils.prepare_rows_as_nested_dicts(shards, ())
377 for serialized, shard in zip(serialized_shards, shards):
378 serialized['labels'] = [label.name for label in shard.labels.all()]
379
380 return serialized_shards
381
382
383def add_shard(hostname, label):
384 """Add a shard and start running jobs on it.
385
386 @param hostname: The hostname of the shard to be added; needs to be unique.
387 @param label: A platform label. Jobs of this label will be assigned to the
388 shard.
389
Jakob Juelich8b110ee2014-09-15 16:13:42 -0700390 @raises error.RPCException: If label provided doesn't start with `board:`
391 @raises model_logic.ValidationError: If a shard with the given hostname
Jakob Juelich82b7d1c2014-09-15 16:10:57 -0700392 already exists.
Jakob Juelich8b110ee2014-09-15 16:13:42 -0700393 @raises models.Label.DoesNotExist: If the label specified doesn't exist.
Jakob Juelich82b7d1c2014-09-15 16:10:57 -0700394 """
Jakob Juelich8b110ee2014-09-15 16:13:42 -0700395 if not label.startswith('board:'):
396 raise error.RPCException('Sharding only supported for `board:.*` '
397 'labels.')
398
399 # Fetch label first, so shard isn't created when label doesn't exist.
400 label = models.Label.smart_get(label)
Jakob Juelich82b7d1c2014-09-15 16:10:57 -0700401 shard = models.Shard.add_object(hostname=hostname)
Jakob Juelich8b110ee2014-09-15 16:13:42 -0700402 shard.labels.add(label)
Jakob Juelich82b7d1c2014-09-15 16:10:57 -0700403 return shard.id
404
405
406def delete_shard(hostname):
407 """Delete a shard and reclaim all resources from it.
408
409 This claims back all assigned hosts from the shard. To ensure all DUTs are
410 in a sane state, a Repair task is scheduled for them. This reboots the DUTs
411 and therefore clears all running processes that might be left.
412
413 The shard_id of jobs of that shard will be set to None.
414
415 The status of jobs that haven't been reported to be finished yet, will be
416 lost. The master scheduler will pick up the jobs and execute them.
417
418 @param hostname: Hostname of the shard to delete.
419 """
420 shard = rpc_utils.retrieve_shard(shard_hostname=hostname)
421
422 # TODO(beeps): Power off shard
423
424 # For ChromeOS hosts, repair reboots the DUT.
425 # Repair will excalate through multiple repair steps and will verify the
426 # success after each of them. Anyway, it will always run at least the first
427 # one, which includes a reboot.
428 # After a reboot we can be sure no processes from prior tests that were run
429 # by a shard are still running on the DUT.
430 # Important: Don't just set the status to Repair Failed, as that would run
431 # Verify first, before doing any repair measures. Verify would probably
432 # succeed, so this wouldn't change anything on the DUT.
433 for host in models.Host.objects.filter(shard=shard):
434 models.SpecialTask.objects.create(
435 task=models.SpecialTask.Task.REPAIR,
436 host=host,
437 requested_by=models.User.current_user())
438 models.Host.objects.filter(shard=shard).update(shard=None)
439
440 models.Job.objects.filter(shard=shard).update(shard=None)
441
442 shard.labels.clear()
443
444 shard.delete()