blob: 47cbb36f0392732fe1c022ccc844ded474ec9f39 [file] [log] [blame]
mblighf3294cc2009-04-08 21:17:38 +00001"""
2Autotest AFE Cleanup used by the scheduler
3"""
4
5
Eric Lie0493a42010-11-15 13:05:43 -08006import datetime, time, logging, random
mblighf3294cc2009-04-08 21:17:38 +00007from autotest_lib.database import database_connection
8from autotest_lib.frontend.afe import models
9from autotest_lib.scheduler import email_manager, scheduler_config
showard8dbd05a2010-01-12 18:54:59 +000010from autotest_lib.client.common_lib import host_protections
mblighf3294cc2009-04-08 21:17:38 +000011
12
13class PeriodicCleanup(object):
14
15
16 def __init__(self, db, clean_interval, run_at_initialize=False):
17 self._db = db
18 self.clean_interval = clean_interval
19 self._last_clean_time = time.time()
showard915958d2009-04-22 21:00:58 +000020 self._run_at_initialize = run_at_initialize
21
22
23 def initialize(self):
24 if self._run_at_initialize:
mblighf3294cc2009-04-08 21:17:38 +000025 self._cleanup()
26
27
28 def run_cleanup_maybe(self):
29 should_cleanup = (self._last_clean_time + self.clean_interval * 60
30 < time.time())
31 if should_cleanup:
32 self._cleanup()
33 self._last_clean_time = time.time()
34
35
36 def _cleanup(self):
37 """Abrstract cleanup method."""
38 raise NotImplementedError
39
40
41class UserCleanup(PeriodicCleanup):
42 """User cleanup that is controlled by the global config variable
43 clean_interval in the SCHEDULER section.
44 """
45
46
47 def __init__(self, db, clean_interval_minutes):
48 super(UserCleanup, self).__init__(db, clean_interval_minutes)
showard8dbd05a2010-01-12 18:54:59 +000049 self._last_reverify_time = time.time()
mblighf3294cc2009-04-08 21:17:38 +000050
51
52 def _cleanup(self):
mbligh1ef218d2009-08-03 16:57:56 +000053 logging.info('Running periodic cleanup')
54 self._abort_timed_out_jobs()
mbligh1ef218d2009-08-03 16:57:56 +000055 self._abort_jobs_past_max_runtime()
56 self._clear_inactive_blocks()
57 self._check_for_db_inconsistencies()
showard8dbd05a2010-01-12 18:54:59 +000058 self._reverify_dead_hosts()
mblighf3294cc2009-04-08 21:17:38 +000059
60
61 def _abort_timed_out_jobs(self):
62 msg = 'Aborting all jobs that have timed out and are not complete'
63 logging.info(msg)
64 query = models.Job.objects.filter(hostqueueentry__complete=False).extra(
65 where=['created_on + INTERVAL timeout HOUR < NOW()'])
66 for job in query.distinct():
67 logging.warning('Aborting job %d due to job timeout', job.id)
showard64a95952010-01-13 21:27:16 +000068 job.abort()
mblighf3294cc2009-04-08 21:17:38 +000069
70
showard12f3e322009-05-13 21:27:42 +000071 def _abort_jobs_past_max_runtime(self):
72 """
73 Abort executions that have started and are past the job's max runtime.
74 """
75 logging.info('Aborting all jobs that have passed maximum runtime')
76 rows = self._db.execute("""
77 SELECT hqe.id
showardeab66ce2009-12-23 00:03:56 +000078 FROM afe_host_queue_entries AS hqe
79 INNER JOIN afe_jobs ON (hqe.job_id = afe_jobs.id)
showard12f3e322009-05-13 21:27:42 +000080 WHERE NOT hqe.complete AND NOT hqe.aborted AND
Simran Basi34217022012-11-06 13:43:15 -080081 hqe.started_on + INTERVAL afe_jobs.max_runtime_mins MINUTE <
82 NOW()""")
showard12f3e322009-05-13 21:27:42 +000083 query = models.HostQueueEntry.objects.filter(
84 id__in=[row[0] for row in rows])
85 for queue_entry in query.distinct():
86 logging.warning('Aborting entry %s due to max runtime', queue_entry)
showard64a95952010-01-13 21:27:16 +000087 queue_entry.abort()
showard12f3e322009-05-13 21:27:42 +000088
89
mblighf3294cc2009-04-08 21:17:38 +000090 def _check_for_db_inconsistencies(self):
showard01a51672009-05-29 18:42:37 +000091 logging.info('Cleaning db inconsistencies')
92 self._check_all_invalid_related_objects()
mblighf3294cc2009-04-08 21:17:38 +000093
showard01a51672009-05-29 18:42:37 +000094
95 def _check_invalid_related_objects_one_way(self, first_model,
96 relation_field, second_model):
97 if 'invalid' not in first_model.get_field_dict():
98 return []
99 invalid_objects = list(first_model.objects.filter(invalid=True))
100 first_model.objects.populate_relationships(invalid_objects,
101 second_model,
102 'related_objects')
103 error_lines = []
104 for invalid_object in invalid_objects:
105 if invalid_object.related_objects:
106 related_list = ', '.join(str(related_object) for related_object
107 in invalid_object.related_objects)
108 error_lines.append('Invalid %s %s is related to %ss: %s'
109 % (first_model.__name__, invalid_object,
110 second_model.__name__, related_list))
111 related_manager = getattr(invalid_object, relation_field)
112 related_manager.clear()
113 return error_lines
114
115
116 def _check_invalid_related_objects(self, first_model, first_field,
117 second_model, second_field):
118 errors = self._check_invalid_related_objects_one_way(
119 first_model, first_field, second_model)
120 errors.extend(self._check_invalid_related_objects_one_way(
121 second_model, second_field, first_model))
122 return errors
123
124
125 def _check_all_invalid_related_objects(self):
126 model_pairs = ((models.Host, 'labels', models.Label, 'host_set'),
127 (models.AclGroup, 'hosts', models.Host, 'aclgroup_set'),
128 (models.AclGroup, 'users', models.User, 'aclgroup_set'),
129 (models.Test, 'dependency_labels', models.Label,
130 'test_set'))
131 errors = []
132 for first_model, first_field, second_model, second_field in model_pairs:
133 errors.extend(self._check_invalid_related_objects(
134 first_model, first_field, second_model, second_field))
135
136 if errors:
137 subject = ('%s relationships to invalid models, cleaned all' %
138 len(errors))
139 message = '\n'.join(errors)
140 logging.warning(subject)
141 logging.warning(message)
mblighf3294cc2009-04-08 21:17:38 +0000142 email_manager.manager.enqueue_notify_email(subject, message)
143
144
145 def _clear_inactive_blocks(self):
146 msg = 'Clear out blocks for all completed jobs.'
147 logging.info(msg)
148 # this would be simpler using NOT IN (subquery), but MySQL
149 # treats all IN subqueries as dependent, so this optimizes much
150 # better
151 self._db.execute("""
showardeab66ce2009-12-23 00:03:56 +0000152 DELETE ihq FROM afe_ineligible_host_queues ihq
153 LEFT JOIN (SELECT DISTINCT job_id FROM afe_host_queue_entries
mblighf3294cc2009-04-08 21:17:38 +0000154 WHERE NOT complete) hqe
155 USING (job_id) WHERE hqe.job_id IS NULL""")
156
157
showard8dbd05a2010-01-12 18:54:59 +0000158 def _should_reverify_hosts_now(self):
159 reverify_period_sec = (scheduler_config.config.reverify_period_minutes
160 * 60)
161 if reverify_period_sec == 0:
162 return False
163 return (self._last_reverify_time + reverify_period_sec) <= time.time()
164
165
Eric Lie0493a42010-11-15 13:05:43 -0800166 def _choose_subset_of_hosts_to_reverify(self, hosts):
167 """Given hosts needing verification, return a subset to reverify."""
168 max_at_once = scheduler_config.config.reverify_max_hosts_at_once
169 if (max_at_once > 0 and len(hosts) > max_at_once):
170 return random.sample(hosts, max_at_once)
171 return sorted(hosts)
172
173
showard8dbd05a2010-01-12 18:54:59 +0000174 def _reverify_dead_hosts(self):
175 if not self._should_reverify_hosts_now():
176 return
177
178 self._last_reverify_time = time.time()
179 logging.info('Checking for dead hosts to reverify')
180 hosts = models.Host.objects.filter(
181 status=models.Host.Status.REPAIR_FAILED,
182 locked=False,
183 invalid=False)
184 hosts = hosts.exclude(
185 protection=host_protections.Protection.DO_NOT_VERIFY)
186 if not hosts:
187 return
188
Eric Lie0493a42010-11-15 13:05:43 -0800189 hosts = list(hosts)
190 total_hosts = len(hosts)
191 hosts = self._choose_subset_of_hosts_to_reverify(hosts)
192 logging.info('Reverifying dead hosts (%d of %d) %s', len(hosts),
193 total_hosts, ', '.join(host.hostname for host in hosts))
showard8dbd05a2010-01-12 18:54:59 +0000194 for host in hosts:
showardbe030fb2010-01-15 00:21:20 +0000195 models.SpecialTask.schedule_special_task(
showard8dbd05a2010-01-12 18:54:59 +0000196 host=host, task=models.SpecialTask.Task.VERIFY)
197
198
mblighf3294cc2009-04-08 21:17:38 +0000199class TwentyFourHourUpkeep(PeriodicCleanup):
200 """Cleanup that runs at the startup of monitor_db and every subsequent
201 twenty four hours.
202 """
203
204
205 def __init__(self, db, run_at_initialize=True):
206 clean_interval = 24 * 60 # 24 hours
207 super(TwentyFourHourUpkeep, self).__init__(
208 db, clean_interval, run_at_initialize=run_at_initialize)
209
210
211 def _cleanup(self):
212 logging.info('Running 24 hour clean up')
213 self._django_session_cleanup()
showard01a51672009-05-29 18:42:37 +0000214 self._check_for_uncleanable_db_inconsistencies()
mblighf3294cc2009-04-08 21:17:38 +0000215
216
217 def _django_session_cleanup(self):
218 """Clean up django_session since django doesn't for us.
219 http://www.djangoproject.com/documentation/0.96/sessions/
220 """
221 logging.info('Deleting old sessions from django_session')
Scott Zawalski5649cff2012-08-13 14:48:04 -0400222 sql = 'TRUNCATE TABLE django_session'
mblighf3294cc2009-04-08 21:17:38 +0000223 self._db.execute(sql)
showard01a51672009-05-29 18:42:37 +0000224
225
226 def _check_for_uncleanable_db_inconsistencies(self):
227 logging.info('Checking for uncleanable DB inconsistencies')
228 self._check_for_active_and_complete_queue_entries()
229 self._check_for_multiple_platform_hosts()
230 self._check_for_no_platform_hosts()
showard6157c632009-07-06 20:19:31 +0000231 self._check_for_multiple_atomic_group_hosts()
showard01a51672009-05-29 18:42:37 +0000232
233
234 def _check_for_active_and_complete_queue_entries(self):
235 query = models.HostQueueEntry.objects.filter(active=True, complete=True)
236 if query.count() != 0:
237 subject = ('%d queue entries found with active=complete=1'
238 % query.count())
Simran Basi1c5b0572012-10-11 11:27:51 -0700239 lines = []
240 for entry in query:
241 lines.append(str(entry.get_object_dict()))
242 if entry.status == 'Aborted':
243 logging.error('Aborted entry: %s is both active and '
244 'complete. Setting active value to False.',
245 str(entry))
246 entry.active = False
247 entry.save()
showard01a51672009-05-29 18:42:37 +0000248 self._send_inconsistency_message(subject, lines)
249
250
251 def _check_for_multiple_platform_hosts(self):
252 rows = self._db.execute("""
showardeab66ce2009-12-23 00:03:56 +0000253 SELECT afe_hosts.id, hostname, COUNT(1) AS platform_count,
254 GROUP_CONCAT(afe_labels.name)
255 FROM afe_hosts
256 INNER JOIN afe_hosts_labels ON
257 afe_hosts.id = afe_hosts_labels.host_id
258 INNER JOIN afe_labels ON afe_hosts_labels.label_id = afe_labels.id
259 WHERE afe_labels.platform
260 GROUP BY afe_hosts.id
showard01a51672009-05-29 18:42:37 +0000261 HAVING platform_count > 1
262 ORDER BY hostname""")
263 if rows:
264 subject = '%s hosts with multiple platforms' % self._db.rowcount
265 lines = [' '.join(str(item) for item in row)
266 for row in rows]
267 self._send_inconsistency_message(subject, lines)
268
269
270 def _check_for_no_platform_hosts(self):
271 rows = self._db.execute("""
272 SELECT hostname
showardeab66ce2009-12-23 00:03:56 +0000273 FROM afe_hosts
274 LEFT JOIN afe_hosts_labels
275 ON afe_hosts.id = afe_hosts_labels.host_id
276 AND afe_hosts_labels.label_id IN (SELECT id FROM afe_labels
277 WHERE platform)
278 WHERE NOT afe_hosts.invalid AND afe_hosts_labels.host_id IS NULL""")
showard01a51672009-05-29 18:42:37 +0000279 if rows:
jamesren675bfe72010-02-19 21:56:13 +0000280 logging.warn('%s hosts with no platform\n%s', self._db.rowcount,
281 ', '.join(row[0] for row in rows))
showard01a51672009-05-29 18:42:37 +0000282
283
showard6157c632009-07-06 20:19:31 +0000284 def _check_for_multiple_atomic_group_hosts(self):
285 rows = self._db.execute("""
showardeab66ce2009-12-23 00:03:56 +0000286 SELECT afe_hosts.id, hostname,
287 COUNT(DISTINCT afe_atomic_groups.name) AS atomic_group_count,
288 GROUP_CONCAT(afe_labels.name),
289 GROUP_CONCAT(afe_atomic_groups.name)
290 FROM afe_hosts
291 INNER JOIN afe_hosts_labels ON
292 afe_hosts.id = afe_hosts_labels.host_id
293 INNER JOIN afe_labels ON afe_hosts_labels.label_id = afe_labels.id
294 INNER JOIN afe_atomic_groups ON
295 afe_labels.atomic_group_id = afe_atomic_groups.id
296 WHERE NOT afe_hosts.invalid AND NOT afe_labels.invalid
297 GROUP BY afe_hosts.id
showard6157c632009-07-06 20:19:31 +0000298 HAVING atomic_group_count > 1
299 ORDER BY hostname""")
300 if rows:
301 subject = '%s hosts with multiple atomic groups' % self._db.rowcount
302 lines = [' '.join(str(item) for item in row)
303 for row in rows]
304 self._send_inconsistency_message(subject, lines)
305
306
showard01a51672009-05-29 18:42:37 +0000307 def _send_inconsistency_message(self, subject, lines):
308 logging.error(subject)
309 message = '\n'.join(lines)
310 if len(message) > 5000:
311 message = message[:5000] + '\n(truncated)\n'
312 email_manager.manager.enqueue_notify_email(subject, message)