blob: 352e4ad48fa56260a69be6e35686a203023e7ded [file] [log] [blame]
showardce38e0c2008-05-29 19:36:16 +00001#!/usr/bin/python
2
showard8fe93b52008-11-18 17:53:22 +00003import unittest, time, subprocess, os, StringIO, tempfile, datetime, shutil
showard6157c632009-07-06 20:19:31 +00004import logging
showardce38e0c2008-05-29 19:36:16 +00005import common
mbligh8bcd23a2009-02-03 19:14:06 +00006import MySQLdb
showard364fe862008-10-17 02:01:16 +00007from autotest_lib.frontend import setup_django_environment
showardb6d16622009-05-26 19:35:29 +00008from autotest_lib.frontend.afe import frontend_test_utils
jadmanskifb7cfb12008-07-09 14:13:21 +00009from autotest_lib.client.common_lib import global_config, host_protections
jadmanski3d161b02008-06-06 15:43:36 +000010from autotest_lib.client.common_lib.test_utils import mock
showard442e71e2008-10-06 10:05:20 +000011from autotest_lib.database import database_connection, migrate
showard21baa452008-10-21 00:08:39 +000012from autotest_lib.frontend import thread_local
showardb1e51872008-10-07 11:08:18 +000013from autotest_lib.frontend.afe import models
showard170873e2009-01-07 00:22:26 +000014from autotest_lib.scheduler import monitor_db, drone_manager, email_manager
showardd1ee1dd2009-01-07 21:33:08 +000015from autotest_lib.scheduler import scheduler_config
showardce38e0c2008-05-29 19:36:16 +000016
17_DEBUG = False
18
showarda3c58572009-03-12 20:36:59 +000019
showard170873e2009-01-07 00:22:26 +000020class DummyAgent(object):
21 _is_running = False
22 _is_done = False
23 num_processes = 1
24 host_ids = []
25 queue_entry_ids = []
26
27 def is_running(self):
28 return self._is_running
29
30
31 def tick(self):
32 self._is_running = True
33
34
35 def is_done(self):
36 return self._is_done
37
38
39 def set_done(self, done):
40 self._is_done = done
41 self._is_running = not done
showard04c82c52008-05-29 19:38:12 +000042
showard56193bb2008-08-13 20:07:41 +000043
44class IsRow(mock.argument_comparator):
45 def __init__(self, row_id):
46 self.row_id = row_id
showardce38e0c2008-05-29 19:36:16 +000047
48
showard56193bb2008-08-13 20:07:41 +000049 def is_satisfied_by(self, parameter):
50 return list(parameter)[0] == self.row_id
51
52
53 def __str__(self):
54 return 'row with id %s' % self.row_id
55
56
showardd3dc1992009-04-22 21:01:40 +000057class IsAgentWithTask(mock.argument_comparator):
58 def __init__(self, task):
59 self._task = task
60
61
62 def is_satisfied_by(self, parameter):
63 if not isinstance(parameter, monitor_db.Agent):
64 return False
65 tasks = list(parameter.queue.queue)
66 if len(tasks) != 1:
67 return False
68 return tasks[0] == self._task
69
70
showard6b733412009-04-27 20:09:18 +000071def _set_host_and_qe_ids(agent_or_task, id_list=None):
72 if id_list is None:
73 id_list = []
74 agent_or_task.host_ids = agent_or_task.queue_entry_ids = id_list
75
76
showardb6d16622009-05-26 19:35:29 +000077class BaseSchedulerTest(unittest.TestCase,
78 frontend_test_utils.FrontendTestMixin):
showard50c0e712008-09-22 16:20:37 +000079 _config_section = 'AUTOTEST_WEB'
showardce38e0c2008-05-29 19:36:16 +000080
jadmanski0afbb632008-06-06 21:10:57 +000081 def _do_query(self, sql):
showardb1e51872008-10-07 11:08:18 +000082 self._database.execute(sql)
showardce38e0c2008-05-29 19:36:16 +000083
84
showardb6d16622009-05-26 19:35:29 +000085 def _set_monitor_stubs(self):
86 # Clear the instance cache as this is a brand new database.
87 monitor_db.DBObject._clear_instance_cache()
showardce38e0c2008-05-29 19:36:16 +000088
showardb1e51872008-10-07 11:08:18 +000089 self._database = (
90 database_connection.DatabaseConnection.get_test_database(
91 self._test_db_file))
92 self._database.connect()
93 self._database.debug = _DEBUG
showardce38e0c2008-05-29 19:36:16 +000094
showardb1e51872008-10-07 11:08:18 +000095 monitor_db._db = self._database
showard170873e2009-01-07 00:22:26 +000096 monitor_db._drone_manager._results_dir = '/test/path'
97 monitor_db._drone_manager._temporary_directory = '/test/path/tmp'
showard56193bb2008-08-13 20:07:41 +000098
99
showard56193bb2008-08-13 20:07:41 +0000100 def setUp(self):
showardb6d16622009-05-26 19:35:29 +0000101 self._frontend_common_setup()
showard56193bb2008-08-13 20:07:41 +0000102 self._set_monitor_stubs()
103 self._dispatcher = monitor_db.Dispatcher()
showardce38e0c2008-05-29 19:36:16 +0000104
105
showard56193bb2008-08-13 20:07:41 +0000106 def tearDown(self):
showardb6d16622009-05-26 19:35:29 +0000107 self._database.disconnect()
108 self._frontend_common_teardown()
showardce38e0c2008-05-29 19:36:16 +0000109
110
showard56193bb2008-08-13 20:07:41 +0000111 def _update_hqe(self, set, where=''):
112 query = 'UPDATE host_queue_entries SET ' + set
113 if where:
114 query += ' WHERE ' + where
115 self._do_query(query)
116
117
showarda3c58572009-03-12 20:36:59 +0000118class DBObjectTest(BaseSchedulerTest):
119 # It may seem odd to subclass BaseSchedulerTest for this but it saves us
120 # duplicating some setup work for what we want to test.
121
122
123 def test_compare_fields_in_row(self):
124 host = monitor_db.Host(id=1)
125 fields = list(host._fields)
126 row_data = [getattr(host, fieldname) for fieldname in fields]
127 self.assertEqual({}, host._compare_fields_in_row(row_data))
128 row_data[fields.index('hostname')] = 'spam'
129 self.assertEqual({'hostname': ('host1', 'spam')},
130 host._compare_fields_in_row(row_data))
131 row_data[fields.index('id')] = 23
132 self.assertEqual({'hostname': ('host1', 'spam'), 'id': (1, 23)},
133 host._compare_fields_in_row(row_data))
134
135
136 def test_always_query(self):
137 host_a = monitor_db.Host(id=2)
138 self.assertEqual(host_a.hostname, 'host2')
139 self._do_query('UPDATE hosts SET hostname="host2-updated" WHERE id=2')
140 host_b = monitor_db.Host(id=2, always_query=True)
141 self.assert_(host_a is host_b, 'Cached instance not returned.')
142 self.assertEqual(host_a.hostname, 'host2-updated',
143 'Database was not re-queried')
144
145 # If either of these are called, a query was made when it shouldn't be.
146 host_a._compare_fields_in_row = lambda _: self.fail('eek! a query!')
showard12f3e322009-05-13 21:27:42 +0000147 host_a._update_fields_from_row = host_a._compare_fields_in_row
showarda3c58572009-03-12 20:36:59 +0000148 host_c = monitor_db.Host(id=2, always_query=False)
149 self.assert_(host_a is host_c, 'Cached instance not returned')
150
151
152 def test_delete(self):
153 host = monitor_db.Host(id=3)
154 host.delete()
155 host = self.assertRaises(monitor_db.DBError, monitor_db.Host, id=3,
156 always_query=False)
157 host = self.assertRaises(monitor_db.DBError, monitor_db.Host, id=3,
158 always_query=True)
159
showard76e29d12009-04-15 21:53:10 +0000160 def test_save(self):
161 # Dummy Job to avoid creating a one in the HostQueueEntry __init__.
162 class MockJob(object):
163 def __init__(self, id):
164 pass
165 def tag(self):
166 return 'MockJob'
167 self.god.stub_with(monitor_db, 'Job', MockJob)
168 hqe = monitor_db.HostQueueEntry(
169 new_record=True,
showard12f3e322009-05-13 21:27:42 +0000170 row=[0, 1, 2, 'Queued', None, 0, 0, 0, '.', None, False, None])
showard76e29d12009-04-15 21:53:10 +0000171 hqe.save()
172 new_id = hqe.id
173 # Force a re-query and verify that the correct data was stored.
174 monitor_db.DBObject._clear_instance_cache()
175 hqe = monitor_db.HostQueueEntry(id=new_id)
176 self.assertEqual(hqe.id, new_id)
177 self.assertEqual(hqe.job_id, 1)
178 self.assertEqual(hqe.host_id, 2)
179 self.assertEqual(hqe.status, 'Queued')
180 self.assertEqual(hqe.meta_host, None)
181 self.assertEqual(hqe.active, False)
182 self.assertEqual(hqe.complete, False)
183 self.assertEqual(hqe.deleted, False)
184 self.assertEqual(hqe.execution_subdir, '.')
185 self.assertEqual(hqe.atomic_group_id, None)
showard12f3e322009-05-13 21:27:42 +0000186 self.assertEqual(hqe.started_on, None)
showarda3c58572009-03-12 20:36:59 +0000187
188
showardb2e2c322008-10-14 17:33:55 +0000189class DispatcherSchedulingTest(BaseSchedulerTest):
showard56193bb2008-08-13 20:07:41 +0000190 _jobs_scheduled = []
191
showard89f84db2009-03-12 20:39:13 +0000192
193 def tearDown(self):
194 super(DispatcherSchedulingTest, self).tearDown()
195
196
showard56193bb2008-08-13 20:07:41 +0000197 def _set_monitor_stubs(self):
198 super(DispatcherSchedulingTest, self)._set_monitor_stubs()
showard89f84db2009-03-12 20:39:13 +0000199
showard77182562009-06-10 00:16:05 +0000200 def hqe__do_run_pre_job_tasks_stub(queue_entry):
201 """Return a test dummy. Called by HostQueueEntry.run()."""
202 self._record_job_scheduled(queue_entry.job.id, queue_entry.host.id)
showard89f84db2009-03-12 20:39:13 +0000203 queue_entry.set_status('Starting')
showard170873e2009-01-07 00:22:26 +0000204 return DummyAgent()
showard89f84db2009-03-12 20:39:13 +0000205
showard77182562009-06-10 00:16:05 +0000206 self.god.stub_with(monitor_db.HostQueueEntry, '_do_run_pre_job_tasks',
207 hqe__do_run_pre_job_tasks_stub)
showard89f84db2009-03-12 20:39:13 +0000208
209 def hqe_queue_log_record_stub(self, log_line):
210 """No-Op to avoid calls down to the _drone_manager during tests."""
211
212 self.god.stub_with(monitor_db.HostQueueEntry, 'queue_log_record',
213 hqe_queue_log_record_stub)
showard56193bb2008-08-13 20:07:41 +0000214
215
216 def _record_job_scheduled(self, job_id, host_id):
217 record = (job_id, host_id)
218 self.assert_(record not in self._jobs_scheduled,
219 'Job %d scheduled on host %d twice' %
220 (job_id, host_id))
221 self._jobs_scheduled.append(record)
222
223
224 def _assert_job_scheduled_on(self, job_id, host_id):
225 record = (job_id, host_id)
226 self.assert_(record in self._jobs_scheduled,
227 'Job %d not scheduled on host %d as expected\n'
228 'Jobs scheduled: %s' %
229 (job_id, host_id, self._jobs_scheduled))
230 self._jobs_scheduled.remove(record)
231
232
showard89f84db2009-03-12 20:39:13 +0000233 def _assert_job_scheduled_on_number_of(self, job_id, host_ids, number):
234 """Assert job was scheduled on exactly number hosts out of a set."""
235 found = []
236 for host_id in host_ids:
237 record = (job_id, host_id)
238 if record in self._jobs_scheduled:
239 found.append(record)
240 self._jobs_scheduled.remove(record)
241 if len(found) < number:
242 self.fail('Job %d scheduled on fewer than %d hosts in %s.\n'
243 'Jobs scheduled: %s' % (job_id, number, host_ids, found))
244 elif len(found) > number:
245 self.fail('Job %d scheduled on more than %d hosts in %s.\n'
246 'Jobs scheduled: %s' % (job_id, number, host_ids, found))
247
248
showard56193bb2008-08-13 20:07:41 +0000249 def _check_for_extra_schedulings(self):
250 if len(self._jobs_scheduled) != 0:
251 self.fail('Extra jobs scheduled: ' +
252 str(self._jobs_scheduled))
253
254
jadmanski0afbb632008-06-06 21:10:57 +0000255 def _convert_jobs_to_metahosts(self, *job_ids):
256 sql_tuple = '(' + ','.join(str(i) for i in job_ids) + ')'
257 self._do_query('UPDATE host_queue_entries SET '
258 'meta_host=host_id, host_id=NULL '
259 'WHERE job_id IN ' + sql_tuple)
showardce38e0c2008-05-29 19:36:16 +0000260
261
jadmanski0afbb632008-06-06 21:10:57 +0000262 def _lock_host(self, host_id):
263 self._do_query('UPDATE hosts SET locked=1 WHERE id=' +
264 str(host_id))
showardce38e0c2008-05-29 19:36:16 +0000265
266
jadmanski0afbb632008-06-06 21:10:57 +0000267 def setUp(self):
showard56193bb2008-08-13 20:07:41 +0000268 super(DispatcherSchedulingTest, self).setUp()
jadmanski0afbb632008-06-06 21:10:57 +0000269 self._jobs_scheduled = []
showardce38e0c2008-05-29 19:36:16 +0000270
271
jadmanski0afbb632008-06-06 21:10:57 +0000272 def _test_basic_scheduling_helper(self, use_metahosts):
273 'Basic nonmetahost scheduling'
274 self._create_job_simple([1], use_metahosts)
275 self._create_job_simple([2], use_metahosts)
276 self._dispatcher._schedule_new_jobs()
277 self._assert_job_scheduled_on(1, 1)
278 self._assert_job_scheduled_on(2, 2)
279 self._check_for_extra_schedulings()
showardce38e0c2008-05-29 19:36:16 +0000280
281
jadmanski0afbb632008-06-06 21:10:57 +0000282 def _test_priorities_helper(self, use_metahosts):
283 'Test prioritization ordering'
284 self._create_job_simple([1], use_metahosts)
285 self._create_job_simple([2], use_metahosts)
286 self._create_job_simple([1,2], use_metahosts)
287 self._create_job_simple([1], use_metahosts, priority=1)
288 self._dispatcher._schedule_new_jobs()
289 self._assert_job_scheduled_on(4, 1) # higher priority
290 self._assert_job_scheduled_on(2, 2) # earlier job over later
291 self._check_for_extra_schedulings()
showardce38e0c2008-05-29 19:36:16 +0000292
293
jadmanski0afbb632008-06-06 21:10:57 +0000294 def _test_hosts_ready_helper(self, use_metahosts):
295 """
296 Only hosts that are status=Ready, unlocked and not invalid get
297 scheduled.
298 """
299 self._create_job_simple([1], use_metahosts)
300 self._do_query('UPDATE hosts SET status="Running" WHERE id=1')
301 self._dispatcher._schedule_new_jobs()
302 self._check_for_extra_schedulings()
showardce38e0c2008-05-29 19:36:16 +0000303
jadmanski0afbb632008-06-06 21:10:57 +0000304 self._do_query('UPDATE hosts SET status="Ready", locked=1 '
305 'WHERE id=1')
306 self._dispatcher._schedule_new_jobs()
307 self._check_for_extra_schedulings()
showardce38e0c2008-05-29 19:36:16 +0000308
jadmanski0afbb632008-06-06 21:10:57 +0000309 self._do_query('UPDATE hosts SET locked=0, invalid=1 '
310 'WHERE id=1')
311 self._dispatcher._schedule_new_jobs()
showard5df2b192008-07-03 19:51:57 +0000312 if not use_metahosts:
313 self._assert_job_scheduled_on(1, 1)
jadmanski0afbb632008-06-06 21:10:57 +0000314 self._check_for_extra_schedulings()
showardce38e0c2008-05-29 19:36:16 +0000315
316
jadmanski0afbb632008-06-06 21:10:57 +0000317 def _test_hosts_idle_helper(self, use_metahosts):
318 'Only idle hosts get scheduled'
showard2bab8f42008-11-12 18:15:22 +0000319 self._create_job(hosts=[1], active=True)
jadmanski0afbb632008-06-06 21:10:57 +0000320 self._create_job_simple([1], use_metahosts)
321 self._dispatcher._schedule_new_jobs()
322 self._check_for_extra_schedulings()
showardce38e0c2008-05-29 19:36:16 +0000323
324
showard63a34772008-08-18 19:32:50 +0000325 def _test_obey_ACLs_helper(self, use_metahosts):
326 self._do_query('DELETE FROM acl_groups_hosts WHERE host_id=1')
327 self._create_job_simple([1], use_metahosts)
328 self._dispatcher._schedule_new_jobs()
329 self._check_for_extra_schedulings()
330
331
jadmanski0afbb632008-06-06 21:10:57 +0000332 def test_basic_scheduling(self):
333 self._test_basic_scheduling_helper(False)
showardce38e0c2008-05-29 19:36:16 +0000334
335
jadmanski0afbb632008-06-06 21:10:57 +0000336 def test_priorities(self):
337 self._test_priorities_helper(False)
showardce38e0c2008-05-29 19:36:16 +0000338
339
jadmanski0afbb632008-06-06 21:10:57 +0000340 def test_hosts_ready(self):
341 self._test_hosts_ready_helper(False)
showardce38e0c2008-05-29 19:36:16 +0000342
343
jadmanski0afbb632008-06-06 21:10:57 +0000344 def test_hosts_idle(self):
345 self._test_hosts_idle_helper(False)
showardce38e0c2008-05-29 19:36:16 +0000346
347
showard63a34772008-08-18 19:32:50 +0000348 def test_obey_ACLs(self):
349 self._test_obey_ACLs_helper(False)
350
351
showard2924b0a2009-06-18 23:16:15 +0000352 def test_one_time_hosts_ignore_ACLs(self):
353 self._do_query('DELETE FROM acl_groups_hosts WHERE host_id=1')
354 self._do_query('UPDATE hosts SET invalid=1 WHERE id=1')
355 self._create_job_simple([1])
356 self._dispatcher._schedule_new_jobs()
357 self._assert_job_scheduled_on(1, 1)
358 self._check_for_extra_schedulings()
359
360
showard63a34772008-08-18 19:32:50 +0000361 def test_non_metahost_on_invalid_host(self):
362 """
363 Non-metahost entries can get scheduled on invalid hosts (this is how
364 one-time hosts work).
365 """
366 self._do_query('UPDATE hosts SET invalid=1')
367 self._test_basic_scheduling_helper(False)
368
369
jadmanski0afbb632008-06-06 21:10:57 +0000370 def test_metahost_scheduling(self):
showard63a34772008-08-18 19:32:50 +0000371 """
372 Basic metahost scheduling
373 """
jadmanski0afbb632008-06-06 21:10:57 +0000374 self._test_basic_scheduling_helper(True)
showardce38e0c2008-05-29 19:36:16 +0000375
376
jadmanski0afbb632008-06-06 21:10:57 +0000377 def test_metahost_priorities(self):
378 self._test_priorities_helper(True)
showardce38e0c2008-05-29 19:36:16 +0000379
380
jadmanski0afbb632008-06-06 21:10:57 +0000381 def test_metahost_hosts_ready(self):
382 self._test_hosts_ready_helper(True)
showardce38e0c2008-05-29 19:36:16 +0000383
384
jadmanski0afbb632008-06-06 21:10:57 +0000385 def test_metahost_hosts_idle(self):
386 self._test_hosts_idle_helper(True)
showardce38e0c2008-05-29 19:36:16 +0000387
388
showard63a34772008-08-18 19:32:50 +0000389 def test_metahost_obey_ACLs(self):
390 self._test_obey_ACLs_helper(True)
391
392
showard89f84db2009-03-12 20:39:13 +0000393 def _setup_test_only_if_needed_labels(self):
showardade14e22009-01-26 22:38:32 +0000394 # apply only_if_needed label3 to host1
showard89f84db2009-03-12 20:39:13 +0000395 models.Host.smart_get('host1').labels.add(self.label3)
396 return self._create_job_simple([1], use_metahost=True)
showardade14e22009-01-26 22:38:32 +0000397
showard89f84db2009-03-12 20:39:13 +0000398
399 def test_only_if_needed_labels_avoids_host(self):
400 job = self._setup_test_only_if_needed_labels()
showardade14e22009-01-26 22:38:32 +0000401 # if the job doesn't depend on label3, there should be no scheduling
402 self._dispatcher._schedule_new_jobs()
403 self._check_for_extra_schedulings()
404
showard89f84db2009-03-12 20:39:13 +0000405
406 def test_only_if_needed_labels_schedules(self):
407 job = self._setup_test_only_if_needed_labels()
408 job.dependency_labels.add(self.label3)
showardade14e22009-01-26 22:38:32 +0000409 self._dispatcher._schedule_new_jobs()
410 self._assert_job_scheduled_on(1, 1)
411 self._check_for_extra_schedulings()
412
showard89f84db2009-03-12 20:39:13 +0000413
414 def test_only_if_needed_labels_via_metahost(self):
415 job = self._setup_test_only_if_needed_labels()
416 job.dependency_labels.add(self.label3)
showardade14e22009-01-26 22:38:32 +0000417 # should also work if the metahost is the only_if_needed label
418 self._do_query('DELETE FROM jobs_dependency_labels')
419 self._create_job(metahosts=[3])
420 self._dispatcher._schedule_new_jobs()
421 self._assert_job_scheduled_on(2, 1)
422 self._check_for_extra_schedulings()
showard989f25d2008-10-01 11:38:11 +0000423
424
jadmanski0afbb632008-06-06 21:10:57 +0000425 def test_nonmetahost_over_metahost(self):
426 """
427 Non-metahost entries should take priority over metahost entries
428 for the same host
429 """
430 self._create_job(metahosts=[1])
431 self._create_job(hosts=[1])
432 self._dispatcher._schedule_new_jobs()
433 self._assert_job_scheduled_on(2, 1)
434 self._check_for_extra_schedulings()
showardce38e0c2008-05-29 19:36:16 +0000435
436
jadmanski0afbb632008-06-06 21:10:57 +0000437 def test_metahosts_obey_blocks(self):
438 """
439 Metahosts can't get scheduled on hosts already scheduled for
440 that job.
441 """
442 self._create_job(metahosts=[1], hosts=[1])
443 # make the nonmetahost entry complete, so the metahost can try
444 # to get scheduled
showard56193bb2008-08-13 20:07:41 +0000445 self._update_hqe(set='complete = 1', where='host_id=1')
jadmanski0afbb632008-06-06 21:10:57 +0000446 self._dispatcher._schedule_new_jobs()
447 self._check_for_extra_schedulings()
showardce38e0c2008-05-29 19:36:16 +0000448
449
showard89f84db2009-03-12 20:39:13 +0000450 # TODO(gps): These should probably live in their own TestCase class
451 # specific to testing HostScheduler methods directly. It was convenient
452 # to put it here for now to share existing test environment setup code.
453 def test_HostScheduler_check_atomic_group_labels(self):
454 normal_job = self._create_job(metahosts=[0])
455 atomic_job = self._create_job(atomic_group=1)
456 # Indirectly initialize the internal state of the host scheduler.
457 self._dispatcher._refresh_pending_queue_entries()
458
showard6157c632009-07-06 20:19:31 +0000459 atomic_hqe = monitor_db.HostQueueEntry.fetch(where='job_id=%d' %
460 atomic_job.id).next()
461 normal_hqe = monitor_db.HostQueueEntry.fetch(where='job_id=%d' %
462 normal_job.id).next()
showard89f84db2009-03-12 20:39:13 +0000463
464 host_scheduler = self._dispatcher._host_scheduler
465 self.assertTrue(host_scheduler._check_atomic_group_labels(
466 [self.label4.id], atomic_hqe))
467 self.assertFalse(host_scheduler._check_atomic_group_labels(
468 [self.label4.id], normal_hqe))
469 self.assertFalse(host_scheduler._check_atomic_group_labels(
470 [self.label5.id, self.label6.id, self.label7.id], normal_hqe))
471 self.assertTrue(host_scheduler._check_atomic_group_labels(
472 [self.label4.id, self.label6.id], atomic_hqe))
showard6157c632009-07-06 20:19:31 +0000473 self.assertTrue(host_scheduler._check_atomic_group_labels(
474 [self.label4.id, self.label5.id],
475 atomic_hqe))
showard89f84db2009-03-12 20:39:13 +0000476
477
478 def test_HostScheduler_get_host_atomic_group_id(self):
showard6157c632009-07-06 20:19:31 +0000479 job = self._create_job(metahosts=[self.label6.id])
480 queue_entry = monitor_db.HostQueueEntry.fetch(
481 where='job_id=%d' % job.id).next()
showard89f84db2009-03-12 20:39:13 +0000482 # Indirectly initialize the internal state of the host scheduler.
483 self._dispatcher._refresh_pending_queue_entries()
484
485 # Test the host scheduler
486 host_scheduler = self._dispatcher._host_scheduler
showard6157c632009-07-06 20:19:31 +0000487
488 # Two labels each in a different atomic group. This should log an
489 # error and continue.
490 orig_logging_error = logging.error
491 def mock_logging_error(message, *args):
492 mock_logging_error._num_calls += 1
493 # Test the logging call itself, we just wrapped it to count it.
494 orig_logging_error(message, *args)
495 mock_logging_error._num_calls = 0
496 self.god.stub_with(logging, 'error', mock_logging_error)
497 self.assertNotEquals(None, host_scheduler._get_host_atomic_group_id(
498 [self.label4.id, self.label8.id], queue_entry))
499 self.assertTrue(mock_logging_error._num_calls > 0)
500 self.god.unstub(logging, 'error')
501
502 # Two labels both in the same atomic group, this should not raise an
503 # error, it will merely cause the job to schedule on the intersection.
504 self.assertEquals(1, host_scheduler._get_host_atomic_group_id(
505 [self.label4.id, self.label5.id]))
506
507 self.assertEquals(None, host_scheduler._get_host_atomic_group_id([]))
508 self.assertEquals(None, host_scheduler._get_host_atomic_group_id(
showard89f84db2009-03-12 20:39:13 +0000509 [self.label3.id, self.label7.id, self.label6.id]))
showard6157c632009-07-06 20:19:31 +0000510 self.assertEquals(1, host_scheduler._get_host_atomic_group_id(
showard89f84db2009-03-12 20:39:13 +0000511 [self.label4.id, self.label7.id, self.label6.id]))
showard6157c632009-07-06 20:19:31 +0000512 self.assertEquals(1, host_scheduler._get_host_atomic_group_id(
showard89f84db2009-03-12 20:39:13 +0000513 [self.label7.id, self.label5.id]))
514
515
516 def test_atomic_group_hosts_blocked_from_non_atomic_jobs(self):
517 # Create a job scheduled to run on label6.
518 self._create_job(metahosts=[self.label6.id])
519 self._dispatcher._schedule_new_jobs()
520 # label6 only has hosts that are in atomic groups associated with it,
521 # there should be no scheduling.
522 self._check_for_extra_schedulings()
523
524
525 def test_atomic_group_hosts_blocked_from_non_atomic_jobs_explicit(self):
526 # Create a job scheduled to run on label5. This is an atomic group
527 # label but this job does not request atomic group scheduling.
528 self._create_job(metahosts=[self.label5.id])
529 self._dispatcher._schedule_new_jobs()
530 # label6 only has hosts that are in atomic groups associated with it,
531 # there should be no scheduling.
532 self._check_for_extra_schedulings()
533
534
535 def test_atomic_group_scheduling_basics(self):
536 # Create jobs scheduled to run on an atomic group.
537 job_a = self._create_job(synchronous=True, metahosts=[self.label4.id],
538 atomic_group=1)
539 job_b = self._create_job(synchronous=True, metahosts=[self.label5.id],
540 atomic_group=1)
541 self._dispatcher._schedule_new_jobs()
542 # atomic_group.max_number_of_machines was 2 so we should run on 2.
543 self._assert_job_scheduled_on_number_of(job_a.id, (5, 6, 7), 2)
544 self._assert_job_scheduled_on(job_b.id, 8) # label5
545 self._assert_job_scheduled_on(job_b.id, 9) # label5
546 self._check_for_extra_schedulings()
547
548 # The three host label4 atomic group still has one host available.
549 # That means a job with a synch_count of 1 asking to be scheduled on
550 # the atomic group can still use the final machine.
551 #
552 # This may seem like a somewhat odd use case. It allows the use of an
553 # atomic group as a set of machines to run smaller jobs within (a set
554 # of hosts configured for use in network tests with eachother perhaps?)
555 onehost_job = self._create_job(atomic_group=1)
556 self._dispatcher._schedule_new_jobs()
557 self._assert_job_scheduled_on_number_of(onehost_job.id, (5, 6, 7), 1)
558 self._check_for_extra_schedulings()
559
560 # No more atomic groups have hosts available, no more jobs should
561 # be scheduled.
562 self._create_job(atomic_group=1)
563 self._dispatcher._schedule_new_jobs()
564 self._check_for_extra_schedulings()
565
566
567 def test_atomic_group_scheduling_obeys_acls(self):
568 # Request scheduling on a specific atomic label but be denied by ACLs.
569 self._do_query('DELETE FROM acl_groups_hosts WHERE host_id in (8,9)')
570 job = self._create_job(metahosts=[self.label5.id], atomic_group=1)
571 self._dispatcher._schedule_new_jobs()
572 self._check_for_extra_schedulings()
573
574
575 def test_atomic_group_scheduling_dependency_label_exclude(self):
576 # A dependency label that matches no hosts in the atomic group.
577 job_a = self._create_job(atomic_group=1)
578 job_a.dependency_labels.add(self.label3)
579 self._dispatcher._schedule_new_jobs()
580 self._check_for_extra_schedulings()
581
582
583 def test_atomic_group_scheduling_metahost_dependency_label_exclude(self):
584 # A metahost and dependency label that excludes too many hosts.
585 job_b = self._create_job(synchronous=True, metahosts=[self.label4.id],
586 atomic_group=1)
587 job_b.dependency_labels.add(self.label7)
588 self._dispatcher._schedule_new_jobs()
589 self._check_for_extra_schedulings()
590
591
592 def test_atomic_group_scheduling_dependency_label_match(self):
593 # A dependency label that exists on enough atomic group hosts in only
594 # one of the two atomic group labels.
595 job_c = self._create_job(synchronous=True, atomic_group=1)
596 job_c.dependency_labels.add(self.label7)
597 self._dispatcher._schedule_new_jobs()
598 self._assert_job_scheduled_on_number_of(job_c.id, (8, 9), 2)
599 self._check_for_extra_schedulings()
600
601
602 def test_atomic_group_scheduling_no_metahost(self):
603 # Force it to schedule on the other group for a reliable test.
604 self._do_query('UPDATE hosts SET invalid=1 WHERE id=9')
605 # An atomic job without a metahost.
606 job = self._create_job(synchronous=True, atomic_group=1)
607 self._dispatcher._schedule_new_jobs()
608 self._assert_job_scheduled_on_number_of(job.id, (5, 6, 7), 2)
609 self._check_for_extra_schedulings()
610
611
612 def test_atomic_group_scheduling_partial_group(self):
613 # Make one host in labels[3] unavailable so that there are only two
614 # hosts left in the group.
615 self._do_query('UPDATE hosts SET status="Repair Failed" WHERE id=5')
616 job = self._create_job(synchronous=True, metahosts=[self.label4.id],
617 atomic_group=1)
618 self._dispatcher._schedule_new_jobs()
619 # Verify that it was scheduled on the 2 ready hosts in that group.
620 self._assert_job_scheduled_on(job.id, 6)
621 self._assert_job_scheduled_on(job.id, 7)
622 self._check_for_extra_schedulings()
623
624
625 def test_atomic_group_scheduling_not_enough_available(self):
626 # Mark some hosts in each atomic group label as not usable.
627 # One host running, another invalid in the first group label.
628 self._do_query('UPDATE hosts SET status="Running" WHERE id=5')
629 self._do_query('UPDATE hosts SET invalid=1 WHERE id=6')
630 # One host invalid in the second group label.
631 self._do_query('UPDATE hosts SET invalid=1 WHERE id=9')
632 # Nothing to schedule when no group label has enough (2) good hosts..
633 self._create_job(atomic_group=1, synchronous=True)
634 self._dispatcher._schedule_new_jobs()
635 # There are not enough hosts in either atomic group,
636 # No more scheduling should occur.
637 self._check_for_extra_schedulings()
638
639 # Now create an atomic job that has a synch count of 1. It should
640 # schedule on exactly one of the hosts.
641 onehost_job = self._create_job(atomic_group=1)
642 self._dispatcher._schedule_new_jobs()
643 self._assert_job_scheduled_on_number_of(onehost_job.id, (7, 8), 1)
644
645
646 def test_atomic_group_scheduling_no_valid_hosts(self):
647 self._do_query('UPDATE hosts SET invalid=1 WHERE id in (8,9)')
648 self._create_job(synchronous=True, metahosts=[self.label5.id],
649 atomic_group=1)
650 self._dispatcher._schedule_new_jobs()
651 # no hosts in the selected group and label are valid. no schedulings.
652 self._check_for_extra_schedulings()
653
654
655 def test_atomic_group_scheduling_metahost_works(self):
656 # Test that atomic group scheduling also obeys metahosts.
657 self._create_job(metahosts=[0], atomic_group=1)
658 self._dispatcher._schedule_new_jobs()
659 # There are no atomic group hosts that also have that metahost.
660 self._check_for_extra_schedulings()
661
662 job_b = self._create_job(metahosts=[self.label5.id], atomic_group=1)
663 self._dispatcher._schedule_new_jobs()
664 self._assert_job_scheduled_on(job_b.id, 8)
665 self._assert_job_scheduled_on(job_b.id, 9)
666 self._check_for_extra_schedulings()
667
668
669 def test_atomic_group_skips_ineligible_hosts(self):
670 # Test hosts marked ineligible for this job are not eligible.
671 # How would this ever happen anyways?
672 job = self._create_job(metahosts=[self.label4.id], atomic_group=1)
673 models.IneligibleHostQueue.objects.create(job=job, host_id=5)
674 models.IneligibleHostQueue.objects.create(job=job, host_id=6)
675 models.IneligibleHostQueue.objects.create(job=job, host_id=7)
676 self._dispatcher._schedule_new_jobs()
677 # No scheduling should occur as all desired hosts were ineligible.
678 self._check_for_extra_schedulings()
679
680
681 def test_atomic_group_scheduling_fail(self):
682 # If synch_count is > the atomic group number of machines, the job
683 # should be aborted immediately.
684 model_job = self._create_job(synchronous=True, atomic_group=1)
685 model_job.synch_count = 4
686 model_job.save()
687 job = monitor_db.Job(id=model_job.id)
688 self._dispatcher._schedule_new_jobs()
689 self._check_for_extra_schedulings()
690 queue_entries = job.get_host_queue_entries()
691 self.assertEqual(1, len(queue_entries))
692 self.assertEqual(queue_entries[0].status,
693 models.HostQueueEntry.Status.ABORTED)
694
695
showard205fd602009-03-21 00:17:35 +0000696 def test_atomic_group_no_labels_no_scheduling(self):
697 # Never schedule on atomic groups marked invalid.
698 job = self._create_job(metahosts=[self.label5.id], synchronous=True,
699 atomic_group=1)
700 # Deleting an atomic group via the frontend marks it invalid and
701 # removes all label references to the group. The job now references
702 # an invalid atomic group with no labels associated with it.
703 self.label5.atomic_group.invalid = True
704 self.label5.atomic_group.save()
705 self.label5.atomic_group = None
706 self.label5.save()
707
708 self._dispatcher._schedule_new_jobs()
709 self._check_for_extra_schedulings()
710
711
showard89f84db2009-03-12 20:39:13 +0000712 def test_schedule_directly_on_atomic_group_host_fail(self):
713 # Scheduling a job directly on hosts in an atomic group must
714 # fail to avoid users inadvertently holding up the use of an
715 # entire atomic group by using the machines individually.
716 job = self._create_job(hosts=[5])
717 self._dispatcher._schedule_new_jobs()
718 self._check_for_extra_schedulings()
719
720
721 def test_schedule_directly_on_atomic_group_host(self):
722 # Scheduling a job directly on one host in an atomic group will
723 # work when the atomic group is listed on the HQE in addition
724 # to the host (assuming the sync count is 1).
725 job = self._create_job(hosts=[5], atomic_group=1)
726 self._dispatcher._schedule_new_jobs()
727 self._assert_job_scheduled_on(job.id, 5)
728 self._check_for_extra_schedulings()
729
730
731 def test_schedule_directly_on_atomic_group_hosts_sync2(self):
732 job = self._create_job(hosts=[5,8], atomic_group=1, synchronous=True)
733 self._dispatcher._schedule_new_jobs()
734 self._assert_job_scheduled_on(job.id, 5)
735 self._assert_job_scheduled_on(job.id, 8)
736 self._check_for_extra_schedulings()
737
738
739 def test_schedule_directly_on_atomic_group_hosts_wrong_group(self):
740 job = self._create_job(hosts=[5,8], atomic_group=2, synchronous=True)
741 self._dispatcher._schedule_new_jobs()
742 self._check_for_extra_schedulings()
743
744
showard56193bb2008-08-13 20:07:41 +0000745 def test_only_schedule_queued_entries(self):
746 self._create_job(metahosts=[1])
747 self._update_hqe(set='active=1, host_id=2')
748 self._dispatcher._schedule_new_jobs()
749 self._check_for_extra_schedulings()
750
751
showardfa8629c2008-11-04 16:51:23 +0000752 def test_no_ready_hosts(self):
753 self._create_job(hosts=[1])
754 self._do_query('UPDATE hosts SET status="Repair Failed"')
755 self._dispatcher._schedule_new_jobs()
756 self._check_for_extra_schedulings()
757
758
showardb2e2c322008-10-14 17:33:55 +0000759class DispatcherThrottlingTest(BaseSchedulerTest):
showard4c5374f2008-09-04 17:02:56 +0000760 """
761 Test that the dispatcher throttles:
762 * total number of running processes
763 * number of processes started per cycle
764 """
765 _MAX_RUNNING = 3
766 _MAX_STARTED = 2
767
768 def setUp(self):
769 super(DispatcherThrottlingTest, self).setUp()
showard324bf812009-01-20 23:23:38 +0000770 scheduler_config.config.max_processes_per_drone = self._MAX_RUNNING
showardd1ee1dd2009-01-07 21:33:08 +0000771 scheduler_config.config.max_processes_started_per_cycle = (
772 self._MAX_STARTED)
showard4c5374f2008-09-04 17:02:56 +0000773
showard324bf812009-01-20 23:23:38 +0000774 def fake_max_runnable_processes(fake_self):
775 running = sum(agent.num_processes
776 for agent in self._agents
777 if agent.is_running())
778 return self._MAX_RUNNING - running
779 self.god.stub_with(drone_manager.DroneManager, 'max_runnable_processes',
780 fake_max_runnable_processes)
showard2fa51692009-01-13 23:48:08 +0000781
showard4c5374f2008-09-04 17:02:56 +0000782
showard4c5374f2008-09-04 17:02:56 +0000783 def _setup_some_agents(self, num_agents):
showard170873e2009-01-07 00:22:26 +0000784 self._agents = [DummyAgent() for i in xrange(num_agents)]
showard4c5374f2008-09-04 17:02:56 +0000785 self._dispatcher._agents = list(self._agents)
786
787
788 def _run_a_few_cycles(self):
789 for i in xrange(4):
790 self._dispatcher._handle_agents()
791
792
793 def _assert_agents_started(self, indexes, is_started=True):
794 for i in indexes:
795 self.assert_(self._agents[i].is_running() == is_started,
796 'Agent %d %sstarted' %
797 (i, is_started and 'not ' or ''))
798
799
800 def _assert_agents_not_started(self, indexes):
801 self._assert_agents_started(indexes, False)
802
803
804 def test_throttle_total(self):
805 self._setup_some_agents(4)
806 self._run_a_few_cycles()
807 self._assert_agents_started([0, 1, 2])
808 self._assert_agents_not_started([3])
809
810
811 def test_throttle_per_cycle(self):
812 self._setup_some_agents(3)
813 self._dispatcher._handle_agents()
814 self._assert_agents_started([0, 1])
815 self._assert_agents_not_started([2])
816
817
818 def test_throttle_with_synchronous(self):
819 self._setup_some_agents(2)
820 self._agents[0].num_processes = 3
821 self._run_a_few_cycles()
822 self._assert_agents_started([0])
823 self._assert_agents_not_started([1])
824
825
826 def test_large_agent_starvation(self):
827 """
828 Ensure large agents don't get starved by lower-priority agents.
829 """
830 self._setup_some_agents(3)
831 self._agents[1].num_processes = 3
832 self._run_a_few_cycles()
833 self._assert_agents_started([0])
834 self._assert_agents_not_started([1, 2])
835
836 self._agents[0].set_done(True)
837 self._run_a_few_cycles()
838 self._assert_agents_started([1])
839 self._assert_agents_not_started([2])
840
841
842 def test_zero_process_agent(self):
843 self._setup_some_agents(5)
844 self._agents[4].num_processes = 0
845 self._run_a_few_cycles()
846 self._assert_agents_started([0, 1, 2, 4])
847 self._assert_agents_not_started([3])
848
849
showard1be97432008-10-17 15:30:45 +0000850class FindAbortTest(BaseSchedulerTest):
showard56193bb2008-08-13 20:07:41 +0000851 """
showard1be97432008-10-17 15:30:45 +0000852 Test the dispatcher abort functionality.
showard56193bb2008-08-13 20:07:41 +0000853 """
showard170873e2009-01-07 00:22:26 +0000854 def _check_host_agent(self, agent, host_id):
855 self.assert_(isinstance(agent, monitor_db.Agent))
856 tasks = list(agent.queue.queue)
857 self.assertEquals(len(tasks), 2)
858 cleanup, verify = tasks
showard9d9ffd52008-11-09 23:14:35 +0000859
showard45ae8192008-11-05 19:32:53 +0000860 self.assert_(isinstance(cleanup, monitor_db.CleanupTask))
showard170873e2009-01-07 00:22:26 +0000861 self.assertEquals(cleanup.host.id, host_id)
showard1be97432008-10-17 15:30:45 +0000862
863 self.assert_(isinstance(verify, monitor_db.VerifyTask))
showard170873e2009-01-07 00:22:26 +0000864 self.assertEquals(verify.host.id, host_id)
showard56193bb2008-08-13 20:07:41 +0000865
866
showardd3dc1992009-04-22 21:01:40 +0000867 def _check_agents(self, agents):
showard170873e2009-01-07 00:22:26 +0000868 agents = list(agents)
showardd3dc1992009-04-22 21:01:40 +0000869 self.assertEquals(len(agents), 3)
870 self.assertEquals(agents[0], self._agent)
871 self._check_host_agent(agents[1], 1)
872 self._check_host_agent(agents[2], 2)
showard56193bb2008-08-13 20:07:41 +0000873
874
showardd3dc1992009-04-22 21:01:40 +0000875 def _common_setup(self):
showard56193bb2008-08-13 20:07:41 +0000876 self._create_job(hosts=[1, 2])
showardd3dc1992009-04-22 21:01:40 +0000877 self._update_hqe(set='aborted=1')
878 self._agent = self.god.create_mock_class(monitor_db.Agent, 'old_agent')
showard6b733412009-04-27 20:09:18 +0000879 _set_host_and_qe_ids(self._agent, [1, 2])
showardd3dc1992009-04-22 21:01:40 +0000880 self._agent.abort.expect_call()
881 self._agent.abort.expect_call() # gets called once for each HQE
882 self._dispatcher.add_agent(self._agent)
showard56193bb2008-08-13 20:07:41 +0000883
showardd3dc1992009-04-22 21:01:40 +0000884
885 def test_find_aborting(self):
886 self._common_setup()
showard56193bb2008-08-13 20:07:41 +0000887 self._dispatcher._find_aborting()
showard56193bb2008-08-13 20:07:41 +0000888 self.god.check_playback()
889
890
showardd3dc1992009-04-22 21:01:40 +0000891 def test_find_aborting_verifying(self):
892 self._common_setup()
893 self._update_hqe(set='active=1, status="Verifying"')
showard56193bb2008-08-13 20:07:41 +0000894
showard56193bb2008-08-13 20:07:41 +0000895 self._dispatcher._find_aborting()
896
showardd3dc1992009-04-22 21:01:40 +0000897 self._check_agents(self._dispatcher._agents)
showard56193bb2008-08-13 20:07:41 +0000898 self.god.check_playback()
899
900
showard98863972008-10-29 21:14:56 +0000901class JobTimeoutTest(BaseSchedulerTest):
showard2bab8f42008-11-12 18:15:22 +0000902 def _test_synch_start_timeout_helper(self, expect_abort,
903 set_created_on=True, set_active=True,
904 set_acl=True):
showardd1ee1dd2009-01-07 21:33:08 +0000905 scheduler_config.config.synch_job_start_timeout_minutes = 60
showard98863972008-10-29 21:14:56 +0000906 job = self._create_job(hosts=[1, 2])
showard98863972008-10-29 21:14:56 +0000907 if set_active:
908 hqe = job.hostqueueentry_set.filter(host__id=1)[0]
909 hqe.status = 'Pending'
910 hqe.active = 1
911 hqe.save()
912
913 everyone_acl = models.AclGroup.smart_get('Everyone')
914 host1 = models.Host.smart_get(1)
915 if set_acl:
916 everyone_acl.hosts.add(host1)
917 else:
918 everyone_acl.hosts.remove(host1)
919
920 job.created_on = datetime.datetime.now()
921 if set_created_on:
922 job.created_on -= datetime.timedelta(minutes=100)
923 job.save()
924
showard915958d2009-04-22 21:00:58 +0000925 cleanup = self._dispatcher._periodic_cleanup
926 cleanup._abort_jobs_past_synch_start_timeout()
showard98863972008-10-29 21:14:56 +0000927
928 for hqe in job.hostqueueentry_set.all():
showardd3dc1992009-04-22 21:01:40 +0000929 self.assertEquals(hqe.aborted, expect_abort)
showard98863972008-10-29 21:14:56 +0000930
931
932 def test_synch_start_timeout_helper(self):
933 # no abort if any of the condition aren't met
showard2bab8f42008-11-12 18:15:22 +0000934 self._test_synch_start_timeout_helper(False, set_created_on=False)
935 self._test_synch_start_timeout_helper(False, set_active=False)
936 self._test_synch_start_timeout_helper(False, set_acl=False)
showard98863972008-10-29 21:14:56 +0000937 # abort if all conditions are met
showard2bab8f42008-11-12 18:15:22 +0000938 self._test_synch_start_timeout_helper(True)
showard98863972008-10-29 21:14:56 +0000939
940
jadmanski3d161b02008-06-06 15:43:36 +0000941class PidfileRunMonitorTest(unittest.TestCase):
showard170873e2009-01-07 00:22:26 +0000942 execution_tag = 'test_tag'
jadmanski0afbb632008-06-06 21:10:57 +0000943 pid = 12345
showard170873e2009-01-07 00:22:26 +0000944 process = drone_manager.Process('myhost', pid)
showard21baa452008-10-21 00:08:39 +0000945 num_tests_failed = 1
jadmanski3d161b02008-06-06 15:43:36 +0000946
jadmanski0afbb632008-06-06 21:10:57 +0000947 def setUp(self):
948 self.god = mock.mock_god()
showard170873e2009-01-07 00:22:26 +0000949 self.mock_drone_manager = self.god.create_mock_class(
950 drone_manager.DroneManager, 'drone_manager')
951 self.god.stub_with(monitor_db, '_drone_manager',
952 self.mock_drone_manager)
953 self.god.stub_function(email_manager.manager, 'enqueue_notify_email')
954
955 self.pidfile_id = object()
956
showardd3dc1992009-04-22 21:01:40 +0000957 (self.mock_drone_manager.get_pidfile_id_from
958 .expect_call(self.execution_tag,
959 pidfile_name=monitor_db._AUTOSERV_PID_FILE)
960 .and_return(self.pidfile_id))
showard170873e2009-01-07 00:22:26 +0000961 self.mock_drone_manager.register_pidfile.expect_call(self.pidfile_id)
962
963 self.monitor = monitor_db.PidfileRunMonitor()
964 self.monitor.attach_to_existing_process(self.execution_tag)
jadmanski3d161b02008-06-06 15:43:36 +0000965
966
jadmanski0afbb632008-06-06 21:10:57 +0000967 def tearDown(self):
968 self.god.unstub_all()
jadmanski3d161b02008-06-06 15:43:36 +0000969
970
showard170873e2009-01-07 00:22:26 +0000971 def setup_pidfile(self, pid=None, exit_code=None, tests_failed=None,
972 use_second_read=False):
973 contents = drone_manager.PidfileContents()
974 if pid is not None:
975 contents.process = drone_manager.Process('myhost', pid)
976 contents.exit_status = exit_code
977 contents.num_tests_failed = tests_failed
978 self.mock_drone_manager.get_pidfile_contents.expect_call(
979 self.pidfile_id, use_second_read=use_second_read).and_return(
980 contents)
981
982
jadmanski0afbb632008-06-06 21:10:57 +0000983 def set_not_yet_run(self):
showard170873e2009-01-07 00:22:26 +0000984 self.setup_pidfile()
jadmanski3d161b02008-06-06 15:43:36 +0000985
986
showard3dd6b882008-10-27 19:21:39 +0000987 def set_empty_pidfile(self):
showard170873e2009-01-07 00:22:26 +0000988 self.setup_pidfile()
showard3dd6b882008-10-27 19:21:39 +0000989
990
showard170873e2009-01-07 00:22:26 +0000991 def set_running(self, use_second_read=False):
992 self.setup_pidfile(self.pid, use_second_read=use_second_read)
jadmanski3d161b02008-06-06 15:43:36 +0000993
994
showard170873e2009-01-07 00:22:26 +0000995 def set_complete(self, error_code, use_second_read=False):
996 self.setup_pidfile(self.pid, error_code, self.num_tests_failed,
997 use_second_read=use_second_read)
998
999
1000 def _check_monitor(self, expected_pid, expected_exit_status,
1001 expected_num_tests_failed):
1002 if expected_pid is None:
1003 self.assertEquals(self.monitor._state.process, None)
1004 else:
1005 self.assertEquals(self.monitor._state.process.pid, expected_pid)
1006 self.assertEquals(self.monitor._state.exit_status, expected_exit_status)
1007 self.assertEquals(self.monitor._state.num_tests_failed,
1008 expected_num_tests_failed)
1009
1010
1011 self.god.check_playback()
jadmanski3d161b02008-06-06 15:43:36 +00001012
1013
showard21baa452008-10-21 00:08:39 +00001014 def _test_read_pidfile_helper(self, expected_pid, expected_exit_status,
1015 expected_num_tests_failed):
1016 self.monitor._read_pidfile()
showard170873e2009-01-07 00:22:26 +00001017 self._check_monitor(expected_pid, expected_exit_status,
1018 expected_num_tests_failed)
jadmanski3d161b02008-06-06 15:43:36 +00001019
1020
showard21baa452008-10-21 00:08:39 +00001021 def _get_expected_tests_failed(self, expected_exit_status):
1022 if expected_exit_status is None:
1023 expected_tests_failed = None
1024 else:
1025 expected_tests_failed = self.num_tests_failed
1026 return expected_tests_failed
1027
1028
jadmanski0afbb632008-06-06 21:10:57 +00001029 def test_read_pidfile(self):
1030 self.set_not_yet_run()
showard21baa452008-10-21 00:08:39 +00001031 self._test_read_pidfile_helper(None, None, None)
jadmanski3d161b02008-06-06 15:43:36 +00001032
showard3dd6b882008-10-27 19:21:39 +00001033 self.set_empty_pidfile()
1034 self._test_read_pidfile_helper(None, None, None)
1035
jadmanski0afbb632008-06-06 21:10:57 +00001036 self.set_running()
showard21baa452008-10-21 00:08:39 +00001037 self._test_read_pidfile_helper(self.pid, None, None)
jadmanski3d161b02008-06-06 15:43:36 +00001038
jadmanski0afbb632008-06-06 21:10:57 +00001039 self.set_complete(123)
showard21baa452008-10-21 00:08:39 +00001040 self._test_read_pidfile_helper(self.pid, 123, self.num_tests_failed)
jadmanski3d161b02008-06-06 15:43:36 +00001041
1042
jadmanski0afbb632008-06-06 21:10:57 +00001043 def test_read_pidfile_error(self):
showard170873e2009-01-07 00:22:26 +00001044 self.mock_drone_manager.get_pidfile_contents.expect_call(
1045 self.pidfile_id, use_second_read=False).and_return(
1046 drone_manager.InvalidPidfile('error'))
1047 self.assertRaises(monitor_db.PidfileRunMonitor._PidfileException,
showard21baa452008-10-21 00:08:39 +00001048 self.monitor._read_pidfile)
jadmanski0afbb632008-06-06 21:10:57 +00001049 self.god.check_playback()
jadmanski3d161b02008-06-06 15:43:36 +00001050
1051
showard170873e2009-01-07 00:22:26 +00001052 def setup_is_running(self, is_running):
1053 self.mock_drone_manager.is_process_running.expect_call(
1054 self.process).and_return(is_running)
jadmanski3d161b02008-06-06 15:43:36 +00001055
1056
showard21baa452008-10-21 00:08:39 +00001057 def _test_get_pidfile_info_helper(self, expected_pid, expected_exit_status,
1058 expected_num_tests_failed):
1059 self.monitor._get_pidfile_info()
showard170873e2009-01-07 00:22:26 +00001060 self._check_monitor(expected_pid, expected_exit_status,
1061 expected_num_tests_failed)
jadmanski3d161b02008-06-06 15:43:36 +00001062
1063
jadmanski0afbb632008-06-06 21:10:57 +00001064 def test_get_pidfile_info(self):
showard21baa452008-10-21 00:08:39 +00001065 """
1066 normal cases for get_pidfile_info
1067 """
jadmanski0afbb632008-06-06 21:10:57 +00001068 # running
1069 self.set_running()
showard170873e2009-01-07 00:22:26 +00001070 self.setup_is_running(True)
showard21baa452008-10-21 00:08:39 +00001071 self._test_get_pidfile_info_helper(self.pid, None, None)
jadmanski3d161b02008-06-06 15:43:36 +00001072
jadmanski0afbb632008-06-06 21:10:57 +00001073 # exited during check
1074 self.set_running()
showard170873e2009-01-07 00:22:26 +00001075 self.setup_is_running(False)
1076 self.set_complete(123, use_second_read=True) # pidfile gets read again
showard21baa452008-10-21 00:08:39 +00001077 self._test_get_pidfile_info_helper(self.pid, 123, self.num_tests_failed)
jadmanski3d161b02008-06-06 15:43:36 +00001078
jadmanski0afbb632008-06-06 21:10:57 +00001079 # completed
1080 self.set_complete(123)
showard21baa452008-10-21 00:08:39 +00001081 self._test_get_pidfile_info_helper(self.pid, 123, self.num_tests_failed)
jadmanski3d161b02008-06-06 15:43:36 +00001082
1083
jadmanski0afbb632008-06-06 21:10:57 +00001084 def test_get_pidfile_info_running_no_proc(self):
showard21baa452008-10-21 00:08:39 +00001085 """
1086 pidfile shows process running, but no proc exists
1087 """
jadmanski0afbb632008-06-06 21:10:57 +00001088 # running but no proc
1089 self.set_running()
showard170873e2009-01-07 00:22:26 +00001090 self.setup_is_running(False)
1091 self.set_running(use_second_read=True)
1092 email_manager.manager.enqueue_notify_email.expect_call(
jadmanski0afbb632008-06-06 21:10:57 +00001093 mock.is_string_comparator(), mock.is_string_comparator())
showard21baa452008-10-21 00:08:39 +00001094 self._test_get_pidfile_info_helper(self.pid, 1, 0)
jadmanski0afbb632008-06-06 21:10:57 +00001095 self.assertTrue(self.monitor.lost_process)
jadmanski3d161b02008-06-06 15:43:36 +00001096
1097
jadmanski0afbb632008-06-06 21:10:57 +00001098 def test_get_pidfile_info_not_yet_run(self):
showard21baa452008-10-21 00:08:39 +00001099 """
1100 pidfile hasn't been written yet
1101 """
jadmanski0afbb632008-06-06 21:10:57 +00001102 self.set_not_yet_run()
showard21baa452008-10-21 00:08:39 +00001103 self._test_get_pidfile_info_helper(None, None, None)
jadmanski3d161b02008-06-06 15:43:36 +00001104
jadmanski3d161b02008-06-06 15:43:36 +00001105
showard170873e2009-01-07 00:22:26 +00001106 def test_process_failed_to_write_pidfile(self):
jadmanski0afbb632008-06-06 21:10:57 +00001107 self.set_not_yet_run()
showard170873e2009-01-07 00:22:26 +00001108 email_manager.manager.enqueue_notify_email.expect_call(
1109 mock.is_string_comparator(), mock.is_string_comparator())
showard170873e2009-01-07 00:22:26 +00001110 self.monitor._start_time = time.time() - monitor_db.PIDFILE_TIMEOUT - 1
showard35162b02009-03-03 02:17:30 +00001111 self._test_get_pidfile_info_helper(None, 1, 0)
1112 self.assertTrue(self.monitor.lost_process)
jadmanski3d161b02008-06-06 15:43:36 +00001113
1114
1115class AgentTest(unittest.TestCase):
jadmanski0afbb632008-06-06 21:10:57 +00001116 def setUp(self):
1117 self.god = mock.mock_god()
showard6b733412009-04-27 20:09:18 +00001118 self._dispatcher = self.god.create_mock_class(monitor_db.Dispatcher,
1119 'dispatcher')
jadmanski3d161b02008-06-06 15:43:36 +00001120
1121
jadmanski0afbb632008-06-06 21:10:57 +00001122 def tearDown(self):
1123 self.god.unstub_all()
jadmanski3d161b02008-06-06 15:43:36 +00001124
1125
showard170873e2009-01-07 00:22:26 +00001126 def _create_mock_task(self, name):
1127 task = self.god.create_mock_class(monitor_db.AgentTask, name)
showard6b733412009-04-27 20:09:18 +00001128 _set_host_and_qe_ids(task)
showard170873e2009-01-07 00:22:26 +00001129 return task
1130
showard6b733412009-04-27 20:09:18 +00001131 def _create_agent(self, tasks):
1132 agent = monitor_db.Agent(tasks)
1133 agent.dispatcher = self._dispatcher
1134 return agent
1135
1136
1137 def _finish_agent(self, agent):
1138 while not agent.is_done():
1139 agent.tick()
1140
showard170873e2009-01-07 00:22:26 +00001141
jadmanski0afbb632008-06-06 21:10:57 +00001142 def test_agent(self):
showard170873e2009-01-07 00:22:26 +00001143 task1 = self._create_mock_task('task1')
1144 task2 = self._create_mock_task('task2')
1145 task3 = self._create_mock_task('task3')
showard08a36412009-05-05 01:01:13 +00001146 task1.poll.expect_call()
jadmanski0afbb632008-06-06 21:10:57 +00001147 task1.is_done.expect_call().and_return(False)
1148 task1.poll.expect_call()
1149 task1.is_done.expect_call().and_return(True)
1150 task1.is_done.expect_call().and_return(True)
1151 task1.success = True
jadmanski3d161b02008-06-06 15:43:36 +00001152
showard08a36412009-05-05 01:01:13 +00001153 task2.poll.expect_call()
jadmanski0afbb632008-06-06 21:10:57 +00001154 task2.is_done.expect_call().and_return(True)
1155 task2.is_done.expect_call().and_return(True)
1156 task2.success = False
1157 task2.failure_tasks = [task3]
jadmanski3d161b02008-06-06 15:43:36 +00001158
showardd3dc1992009-04-22 21:01:40 +00001159 self._dispatcher.add_agent.expect_call(IsAgentWithTask(task3))
jadmanski3d161b02008-06-06 15:43:36 +00001160
showard6b733412009-04-27 20:09:18 +00001161 agent = self._create_agent([task1, task2])
showard6b733412009-04-27 20:09:18 +00001162 self._finish_agent(agent)
jadmanski0afbb632008-06-06 21:10:57 +00001163 self.god.check_playback()
jadmanski3d161b02008-06-06 15:43:36 +00001164
1165
showard6b733412009-04-27 20:09:18 +00001166 def _test_agent_abort_helper(self, ignore_abort=False):
1167 task1 = self._create_mock_task('task1')
1168 task2 = self._create_mock_task('task2')
showard6b733412009-04-27 20:09:18 +00001169 task1.poll.expect_call()
1170 task1.is_done.expect_call().and_return(False)
1171 task1.abort.expect_call()
1172 if ignore_abort:
1173 task1.aborted = False # task ignores abort; execution continues
1174
showard08a36412009-05-05 01:01:13 +00001175 task1.poll.expect_call()
showard6b733412009-04-27 20:09:18 +00001176 task1.is_done.expect_call().and_return(True)
1177 task1.is_done.expect_call().and_return(True)
1178 task1.success = True
1179
showard08a36412009-05-05 01:01:13 +00001180 task2.poll.expect_call()
showard6b733412009-04-27 20:09:18 +00001181 task2.is_done.expect_call().and_return(True)
1182 task2.is_done.expect_call().and_return(True)
1183 task2.success = True
1184 else:
showard08a36412009-05-05 01:01:13 +00001185 task1.aborted = True
1186 task2.abort.expect_call()
1187 task2.aborted = True
showard6b733412009-04-27 20:09:18 +00001188
1189 agent = self._create_agent([task1, task2])
showard6b733412009-04-27 20:09:18 +00001190 agent.tick()
1191 agent.abort()
1192 self._finish_agent(agent)
1193 self.god.check_playback()
1194
1195
1196 def test_agent_abort(self):
1197 self._test_agent_abort_helper()
1198 self._test_agent_abort_helper(True)
1199
1200
showard08a36412009-05-05 01:01:13 +00001201 def _test_agent_abort_before_started_helper(self, ignore_abort=False):
showard20f9bdd2009-04-29 19:48:33 +00001202 task = self._create_mock_task('task')
showard08a36412009-05-05 01:01:13 +00001203 task.abort.expect_call()
1204 if ignore_abort:
1205 task.aborted = False
1206 task.poll.expect_call()
1207 task.is_done.expect_call().and_return(True)
1208 task.is_done.expect_call().and_return(True)
1209 task.success = True
1210 else:
1211 task.aborted = True
1212
showard20f9bdd2009-04-29 19:48:33 +00001213 agent = self._create_agent([task])
1214 agent.abort()
showard20f9bdd2009-04-29 19:48:33 +00001215 self._finish_agent(agent)
1216 self.god.check_playback()
1217
1218
showard08a36412009-05-05 01:01:13 +00001219 def test_agent_abort_before_started(self):
1220 self._test_agent_abort_before_started_helper()
1221 self._test_agent_abort_before_started_helper(True)
1222
1223
showard77182562009-06-10 00:16:05 +00001224class DelayedCallTaskTest(unittest.TestCase):
1225 def setUp(self):
1226 self.god = mock.mock_god()
1227
1228
1229 def tearDown(self):
1230 self.god.unstub_all()
1231
1232
1233 def test_delayed_call(self):
1234 test_time = self.god.create_mock_function('time')
1235 test_time.expect_call().and_return(33)
1236 test_time.expect_call().and_return(34.01)
1237 test_time.expect_call().and_return(34.99)
1238 test_time.expect_call().and_return(35.01)
1239 def test_callback():
1240 test_callback.calls += 1
1241 test_callback.calls = 0
1242 delay_task = monitor_db.DelayedCallTask(
1243 delay_seconds=2, callback=test_callback,
1244 now_func=test_time) # time 33
1245 self.assertEqual(35, delay_task.end_time)
1246 agent = monitor_db.Agent([delay_task], num_processes=0)
1247 self.assert_(not agent.active_task)
1248 agent.tick() # activates the task and polls it once, time 34.01
1249 self.assertEqual(0, test_callback.calls, "callback called early")
1250 agent.tick() # time 34.99
1251 self.assertEqual(0, test_callback.calls, "callback called early")
1252 agent.tick() # time 35.01
1253 self.assertEqual(1, test_callback.calls)
1254 self.assert_(agent.is_done())
1255 self.assert_(delay_task.is_done())
1256 self.assert_(delay_task.success)
1257 self.assert_(not delay_task.aborted)
1258 self.god.check_playback()
1259
1260
1261 def test_delayed_call_abort(self):
1262 delay_task = monitor_db.DelayedCallTask(
1263 delay_seconds=987654, callback=lambda : None)
1264 agent = monitor_db.Agent([delay_task], num_processes=0)
1265 agent.abort()
1266 agent.tick()
1267 self.assert_(agent.is_done())
1268 self.assert_(delay_task.aborted)
1269 self.assert_(delay_task.is_done())
1270 self.assert_(not delay_task.success)
1271 self.god.check_playback()
1272
1273
1274
showard184a5e82009-05-29 18:42:20 +00001275class AgentTasksTest(BaseSchedulerTest):
showarded2afea2009-07-07 20:54:07 +00001276 ABSPATH_BASE = '/abspath/'
jadmanski0afbb632008-06-06 21:10:57 +00001277 HOSTNAME = 'myhost'
showard381341a2009-07-15 14:28:56 +00001278 BASE_TASK_DIR = 'hosts/%s/' % HOSTNAME
1279 RESULTS_DIR = '/results/dir'
showard170873e2009-01-07 00:22:26 +00001280 DUMMY_PROCESS = object()
jadmanskifb7cfb12008-07-09 14:13:21 +00001281 HOST_PROTECTION = host_protections.default
showard170873e2009-01-07 00:22:26 +00001282 PIDFILE_ID = object()
showard87ba02a2009-04-20 19:37:32 +00001283 JOB_OWNER = 'test_owner'
1284 JOB_NAME = 'test_job_name'
1285 JOB_AUTOSERV_PARAMS = set(['-u', JOB_OWNER, '-l', JOB_NAME])
jadmanski3d161b02008-06-06 15:43:36 +00001286
jadmanski0afbb632008-06-06 21:10:57 +00001287 def setUp(self):
showard184a5e82009-05-29 18:42:20 +00001288 super(AgentTasksTest, self).setUp()
jadmanski0afbb632008-06-06 21:10:57 +00001289 self.god = mock.mock_god()
showard170873e2009-01-07 00:22:26 +00001290 self.god.stub_with(drone_manager.DroneManager, 'get_temporary_path',
1291 mock.mock_function('get_temporary_path',
1292 default_return_val='tempdir'))
1293 self.god.stub_function(drone_manager.DroneManager,
showard678df4f2009-02-04 21:36:39 +00001294 'copy_results_on_drone')
1295 self.god.stub_function(drone_manager.DroneManager,
showard170873e2009-01-07 00:22:26 +00001296 'copy_to_results_repository')
1297 self.god.stub_function(drone_manager.DroneManager,
1298 'get_pidfile_id_from')
1299
showarded2afea2009-07-07 20:54:07 +00001300 def dummy_absolute_path(drone_manager_self, path):
1301 return self.ABSPATH_BASE + path
showard170873e2009-01-07 00:22:26 +00001302 self.god.stub_with(drone_manager.DroneManager, 'absolute_path',
1303 dummy_absolute_path)
1304
1305 self.god.stub_class_method(monitor_db.PidfileRunMonitor, 'run')
1306 self.god.stub_class_method(monitor_db.PidfileRunMonitor, 'exit_code')
showardb6681aa2009-07-08 21:15:00 +00001307 self.god.stub_class_method(monitor_db.PidfileRunMonitor, 'kill')
showard170873e2009-01-07 00:22:26 +00001308 self.god.stub_class_method(monitor_db.PidfileRunMonitor, 'get_process')
showard6b733412009-04-27 20:09:18 +00001309 def mock_has_process(unused):
1310 return True
1311 self.god.stub_with(monitor_db.PidfileRunMonitor, 'has_process',
1312 mock_has_process)
showard381341a2009-07-15 14:28:56 +00001313
jadmanski0afbb632008-06-06 21:10:57 +00001314 self.host = self.god.create_mock_class(monitor_db.Host, 'host')
showard170873e2009-01-07 00:22:26 +00001315 self.host.id = 1
jadmanski0afbb632008-06-06 21:10:57 +00001316 self.host.hostname = self.HOSTNAME
jadmanskifb7cfb12008-07-09 14:13:21 +00001317 self.host.protection = self.HOST_PROTECTION
showard381341a2009-07-15 14:28:56 +00001318 host = models.Host.objects.create(id=self.host.id,
1319 hostname=self.host.hostname,
1320 protection=self.host.protection)
1321
showard97aed502008-11-04 02:01:24 +00001322 self.job = self.god.create_mock_class(monitor_db.Job, 'job')
showard87ba02a2009-04-20 19:37:32 +00001323 self.job.owner = self.JOB_OWNER
1324 self.job.name = self.JOB_NAME
mblighe7d9c602009-07-02 19:02:33 +00001325 self.job.id = 1337
1326 self.job.tag = lambda: 'fake-job-tag'
showard381341a2009-07-15 14:28:56 +00001327 job = models.Job.objects.create(id=self.job.id, owner=self.job.owner,
1328 name=self.job.name,
1329 created_on=datetime.datetime.now())
1330
1331 self.queue_entry = self.god.create_mock_class(
1332 monitor_db.HostQueueEntry, 'queue_entry')
showard170873e2009-01-07 00:22:26 +00001333 self.queue_entry.id = 1
showard97aed502008-11-04 02:01:24 +00001334 self.queue_entry.job = self.job
jadmanski0afbb632008-06-06 21:10:57 +00001335 self.queue_entry.host = self.host
1336 self.queue_entry.meta_host = None
showard381341a2009-07-15 14:28:56 +00001337 models.HostQueueEntry.objects.create(id=self.queue_entry.id, job=job,
1338 host=host, meta_host=None)
1339
showardd3dc1992009-04-22 21:01:40 +00001340 self._dispatcher = self.god.create_mock_class(monitor_db.Dispatcher,
1341 'dispatcher')
1342
jadmanski3d161b02008-06-06 15:43:36 +00001343
jadmanski0afbb632008-06-06 21:10:57 +00001344 def tearDown(self):
showard184a5e82009-05-29 18:42:20 +00001345 super(AgentTasksTest, self).tearDown()
jadmanski0afbb632008-06-06 21:10:57 +00001346 self.god.unstub_all()
jadmanski3d161b02008-06-06 15:43:36 +00001347
1348
jadmanski0afbb632008-06-06 21:10:57 +00001349 def run_task(self, task, success):
1350 """
1351 Do essentially what an Agent would do, but protect againt
1352 infinite looping from test errors.
1353 """
1354 if not getattr(task, 'agent', None):
1355 task.agent = object()
jadmanski0afbb632008-06-06 21:10:57 +00001356 count = 0
1357 while not task.is_done():
1358 count += 1
1359 if count > 10:
1360 print 'Task failed to finish'
1361 # in case the playback has clues to why it
1362 # failed
1363 self.god.check_playback()
1364 self.fail()
1365 task.poll()
1366 self.assertEquals(task.success, success)
jadmanski3d161b02008-06-06 15:43:36 +00001367
1368
showardb6681aa2009-07-08 21:15:00 +00001369 def setup_run_monitor(self, exit_status, task_tag, copy_log_file=True,
1370 aborted=False):
showard170873e2009-01-07 00:22:26 +00001371 monitor_db.PidfileRunMonitor.run.expect_call(
1372 mock.is_instance_comparator(list),
showarded2afea2009-07-07 20:54:07 +00001373 self.BASE_TASK_DIR + task_tag,
showard170873e2009-01-07 00:22:26 +00001374 nice_level=monitor_db.AUTOSERV_NICE_LEVEL,
showardd3dc1992009-04-22 21:01:40 +00001375 log_file=mock.anything_comparator(),
1376 pidfile_name=monitor_db._AUTOSERV_PID_FILE,
1377 paired_with_pidfile=None)
showard170873e2009-01-07 00:22:26 +00001378 monitor_db.PidfileRunMonitor.exit_code.expect_call()
showardb6681aa2009-07-08 21:15:00 +00001379 if aborted:
1380 monitor_db.PidfileRunMonitor.kill.expect_call()
1381 else:
1382 monitor_db.PidfileRunMonitor.exit_code.expect_call().and_return(
1383 exit_status)
jadmanski3d161b02008-06-06 15:43:36 +00001384
showard170873e2009-01-07 00:22:26 +00001385 if copy_log_file:
1386 self._setup_move_logfile()
1387
1388
showard678df4f2009-02-04 21:36:39 +00001389 def _setup_move_logfile(self, copy_on_drone=False,
1390 include_destination=False):
showard170873e2009-01-07 00:22:26 +00001391 monitor_db.PidfileRunMonitor.get_process.expect_call().and_return(
1392 self.DUMMY_PROCESS)
showard678df4f2009-02-04 21:36:39 +00001393 if copy_on_drone:
showarded2afea2009-07-07 20:54:07 +00001394 self.queue_entry.execution_path.expect_call().and_return('tag')
showard678df4f2009-02-04 21:36:39 +00001395 drone_manager.DroneManager.copy_results_on_drone.expect_call(
1396 self.DUMMY_PROCESS, source_path=mock.is_string_comparator(),
1397 destination_path=mock.is_string_comparator())
1398 elif include_destination:
showard170873e2009-01-07 00:22:26 +00001399 drone_manager.DroneManager.copy_to_results_repository.expect_call(
1400 self.DUMMY_PROCESS, mock.is_string_comparator(),
1401 destination_path=mock.is_string_comparator())
1402 else:
1403 drone_manager.DroneManager.copy_to_results_repository.expect_call(
1404 self.DUMMY_PROCESS, mock.is_string_comparator())
1405
jadmanski3d161b02008-06-06 15:43:36 +00001406
showardcfd4a7e2009-07-11 01:47:33 +00001407 def _test_repair_task_helper(self, success, task_tag, queue_entry=None):
jadmanski0afbb632008-06-06 21:10:57 +00001408 self.host.set_status.expect_call('Repairing')
1409 if success:
showarded2afea2009-07-07 20:54:07 +00001410 self.setup_run_monitor(0, task_tag)
jadmanski0afbb632008-06-06 21:10:57 +00001411 self.host.set_status.expect_call('Ready')
1412 else:
showarded2afea2009-07-07 20:54:07 +00001413 self.setup_run_monitor(1, task_tag)
jadmanski0afbb632008-06-06 21:10:57 +00001414 self.host.set_status.expect_call('Repair Failed')
jadmanski3d161b02008-06-06 15:43:36 +00001415
showardcfd4a7e2009-07-11 01:47:33 +00001416 task = monitor_db.RepairTask(self.host, queue_entry=queue_entry)
showard56193bb2008-08-13 20:07:41 +00001417 self.assertEquals(task.failure_tasks, [])
jadmanski0afbb632008-06-06 21:10:57 +00001418 self.run_task(task, success)
jadmanskifb7cfb12008-07-09 14:13:21 +00001419
1420 expected_protection = host_protections.Protection.get_string(
1421 host_protections.default)
mbligh3e0f7e02008-07-28 19:42:01 +00001422 expected_protection = host_protections.Protection.get_attr_name(
1423 expected_protection)
1424
showard170873e2009-01-07 00:22:26 +00001425 self.assertTrue(set(task.cmd) >=
1426 set([monitor_db._autoserv_path, '-p', '-R', '-m',
showarded2afea2009-07-07 20:54:07 +00001427 self.HOSTNAME, '-r',
1428 drone_manager.WORKING_DIRECTORY,
showard170873e2009-01-07 00:22:26 +00001429 '--host-protection', expected_protection]))
jadmanski0afbb632008-06-06 21:10:57 +00001430 self.god.check_playback()
jadmanski3d161b02008-06-06 15:43:36 +00001431
1432
jadmanski0afbb632008-06-06 21:10:57 +00001433 def test_repair_task(self):
showarded2afea2009-07-07 20:54:07 +00001434 self._test_repair_task_helper(True, '1-repair')
1435 self._test_repair_task_helper(False, '2-repair')
jadmanski3d161b02008-06-06 15:43:36 +00001436
1437
showardcfd4a7e2009-07-11 01:47:33 +00001438 def test_repair_task_with_hqe_already_requeued(self):
1439 # during recovery, a RepairTask can be passed a queue entry that has
1440 # already been requeued. ensure it leaves the HQE alone in that case.
1441 self.queue_entry.meta_host = 1
1442 self.queue_entry.host = None
1443 self._test_repair_task_helper(False, '1-repair',
1444 queue_entry=self.queue_entry)
1445
1446
showard8ac6f2a2009-07-16 14:50:32 +00001447 def test_recovery_repair_task_working_directory(self):
1448 # ensure that a RepairTask recovering an existing SpecialTask picks up
1449 # the working directory immediately
1450 class MockSpecialTask(object):
1451 def execution_path(self):
1452 return '/my/path'
1453
1454 special_task = MockSpecialTask()
1455 task = monitor_db.RepairTask(self.host, task=special_task)
1456
1457 self.assertEquals(task._working_directory, '/my/path')
1458
1459
showardb6681aa2009-07-08 21:15:00 +00001460 def test_repair_task_aborted(self):
1461 self.host.set_status.expect_call('Repairing')
1462 self.setup_run_monitor(0, '1-repair', aborted=True)
1463
1464 task = monitor_db.RepairTask(self.host)
1465 task.agent = object()
1466 task.poll()
1467 task.abort()
1468
1469 self.assertTrue(task.done)
1470 self.assertTrue(task.aborted)
1471 self.assertTrue(task.task.is_complete)
1472 self.assertFalse(task.task.is_active)
1473 self.god.check_playback()
1474
1475
showarded2afea2009-07-07 20:54:07 +00001476 def _test_repair_task_with_queue_entry_helper(self, parse_failed_repair,
1477 task_tag):
showardde634ee2009-01-30 01:44:24 +00001478 self.god.stub_class(monitor_db, 'FinalReparseTask')
1479 self.god.stub_class(monitor_db, 'Agent')
showardd9205182009-04-27 20:09:55 +00001480 self.god.stub_class_method(monitor_db.TaskWithJobKeyvals,
1481 '_write_keyval_after_job')
showardde634ee2009-01-30 01:44:24 +00001482 agent = DummyAgent()
showardd3dc1992009-04-22 21:01:40 +00001483 agent.dispatcher = self._dispatcher
showardde634ee2009-01-30 01:44:24 +00001484
jadmanski0afbb632008-06-06 21:10:57 +00001485 self.host.set_status.expect_call('Repairing')
showarde788ea62008-11-17 21:02:47 +00001486 self.queue_entry.requeue.expect_call()
showarded2afea2009-07-07 20:54:07 +00001487 self.setup_run_monitor(1, task_tag)
jadmanski0afbb632008-06-06 21:10:57 +00001488 self.host.set_status.expect_call('Repair Failed')
showardccbd6c52009-03-21 00:10:21 +00001489 self.queue_entry.update_from_database.expect_call()
showardde634ee2009-01-30 01:44:24 +00001490 self.queue_entry.set_execution_subdir.expect_call()
showardd9205182009-04-27 20:09:55 +00001491 monitor_db.TaskWithJobKeyvals._write_keyval_after_job.expect_call(
1492 'job_queued', mock.is_instance_comparator(int))
1493 monitor_db.TaskWithJobKeyvals._write_keyval_after_job.expect_call(
1494 'job_finished', mock.is_instance_comparator(int))
showard678df4f2009-02-04 21:36:39 +00001495 self._setup_move_logfile(copy_on_drone=True)
showarded2afea2009-07-07 20:54:07 +00001496 self.queue_entry.execution_path.expect_call().and_return('tag')
showard678df4f2009-02-04 21:36:39 +00001497 self._setup_move_logfile()
showarda1e74b32009-05-12 17:32:04 +00001498 self.job.parse_failed_repair = parse_failed_repair
1499 if parse_failed_repair:
1500 reparse_task = monitor_db.FinalReparseTask.expect_new(
1501 [self.queue_entry])
1502 reparse_agent = monitor_db.Agent.expect_new([reparse_task],
1503 num_processes=0)
1504 self._dispatcher.add_agent.expect_call(reparse_agent)
showarde788ea62008-11-17 21:02:47 +00001505 self.queue_entry.handle_host_failure.expect_call()
jadmanski3d161b02008-06-06 15:43:36 +00001506
showarde788ea62008-11-17 21:02:47 +00001507 task = monitor_db.RepairTask(self.host, self.queue_entry)
showardde634ee2009-01-30 01:44:24 +00001508 task.agent = agent
showardccbd6c52009-03-21 00:10:21 +00001509 self.queue_entry.status = 'Queued'
showardd9205182009-04-27 20:09:55 +00001510 self.job.created_on = datetime.datetime(2009, 1, 1)
jadmanski0afbb632008-06-06 21:10:57 +00001511 self.run_task(task, False)
showard87ba02a2009-04-20 19:37:32 +00001512 self.assertTrue(set(task.cmd) >= self.JOB_AUTOSERV_PARAMS)
jadmanski0afbb632008-06-06 21:10:57 +00001513 self.god.check_playback()
jadmanski3d161b02008-06-06 15:43:36 +00001514
1515
showarda1e74b32009-05-12 17:32:04 +00001516 def test_repair_task_with_queue_entry(self):
showarded2afea2009-07-07 20:54:07 +00001517 self._test_repair_task_with_queue_entry_helper(True, '1-repair')
1518 self._test_repair_task_with_queue_entry_helper(False, '2-repair')
showarda1e74b32009-05-12 17:32:04 +00001519
1520
showarded2afea2009-07-07 20:54:07 +00001521 def setup_verify_expects(self, success, use_queue_entry, task_tag):
jadmanski0afbb632008-06-06 21:10:57 +00001522 if use_queue_entry:
showard8fe93b52008-11-18 17:53:22 +00001523 self.queue_entry.set_status.expect_call('Verifying')
jadmanski0afbb632008-06-06 21:10:57 +00001524 self.host.set_status.expect_call('Verifying')
1525 if success:
showarded2afea2009-07-07 20:54:07 +00001526 self.setup_run_monitor(0, task_tag)
jadmanski0afbb632008-06-06 21:10:57 +00001527 self.host.set_status.expect_call('Ready')
1528 else:
showarded2afea2009-07-07 20:54:07 +00001529 self.setup_run_monitor(1, task_tag)
showard8fe93b52008-11-18 17:53:22 +00001530 if use_queue_entry and not self.queue_entry.meta_host:
1531 self.queue_entry.set_execution_subdir.expect_call()
showarded2afea2009-07-07 20:54:07 +00001532 self.queue_entry.execution_path.expect_call().and_return('tag')
showard170873e2009-01-07 00:22:26 +00001533 self._setup_move_logfile(include_destination=True)
jadmanski3d161b02008-06-06 15:43:36 +00001534
1535
showard56193bb2008-08-13 20:07:41 +00001536 def _check_verify_failure_tasks(self, verify_task):
1537 self.assertEquals(len(verify_task.failure_tasks), 1)
1538 repair_task = verify_task.failure_tasks[0]
1539 self.assert_(isinstance(repair_task, monitor_db.RepairTask))
1540 self.assertEquals(verify_task.host, repair_task.host)
showard8fe93b52008-11-18 17:53:22 +00001541 if verify_task.queue_entry:
showard2fe3f1d2009-07-06 20:19:11 +00001542 self.assertEquals(repair_task.queue_entry,
showardccbd6c52009-03-21 00:10:21 +00001543 verify_task.queue_entry)
showard56193bb2008-08-13 20:07:41 +00001544 else:
showard2fe3f1d2009-07-06 20:19:11 +00001545 self.assertEquals(repair_task.queue_entry, None)
showard56193bb2008-08-13 20:07:41 +00001546
1547
showarded2afea2009-07-07 20:54:07 +00001548 def _test_verify_task_helper(self, success, task_tag, use_queue_entry=False,
showard56193bb2008-08-13 20:07:41 +00001549 use_meta_host=False):
showarded2afea2009-07-07 20:54:07 +00001550 self.setup_verify_expects(success, use_queue_entry, task_tag)
jadmanski3d161b02008-06-06 15:43:36 +00001551
jadmanski0afbb632008-06-06 21:10:57 +00001552 if use_queue_entry:
showard170873e2009-01-07 00:22:26 +00001553 task = monitor_db.VerifyTask(queue_entry=self.queue_entry)
jadmanski0afbb632008-06-06 21:10:57 +00001554 else:
1555 task = monitor_db.VerifyTask(host=self.host)
showard56193bb2008-08-13 20:07:41 +00001556 self._check_verify_failure_tasks(task)
jadmanski0afbb632008-06-06 21:10:57 +00001557 self.run_task(task, success)
showard170873e2009-01-07 00:22:26 +00001558 self.assertTrue(set(task.cmd) >=
1559 set([monitor_db._autoserv_path, '-p', '-v', '-m',
showarded2afea2009-07-07 20:54:07 +00001560 self.HOSTNAME, '-r',
1561 drone_manager.WORKING_DIRECTORY]))
showard87ba02a2009-04-20 19:37:32 +00001562 if use_queue_entry:
1563 self.assertTrue(set(task.cmd) >= self.JOB_AUTOSERV_PARAMS)
jadmanski0afbb632008-06-06 21:10:57 +00001564 self.god.check_playback()
jadmanski3d161b02008-06-06 15:43:36 +00001565
1566
jadmanski0afbb632008-06-06 21:10:57 +00001567 def test_verify_task_with_host(self):
showarded2afea2009-07-07 20:54:07 +00001568 self._test_verify_task_helper(True, '1-verify')
1569 self._test_verify_task_helper(False, '2-verify')
jadmanski3d161b02008-06-06 15:43:36 +00001570
1571
jadmanski0afbb632008-06-06 21:10:57 +00001572 def test_verify_task_with_queue_entry(self):
showarded2afea2009-07-07 20:54:07 +00001573 self._test_verify_task_helper(True, '1-verify', use_queue_entry=True)
1574 self._test_verify_task_helper(False, '2-verify', use_queue_entry=True)
showard56193bb2008-08-13 20:07:41 +00001575
1576
1577 def test_verify_task_with_metahost(self):
showard8fe93b52008-11-18 17:53:22 +00001578 self.queue_entry.meta_host = 1
1579 self.test_verify_task_with_queue_entry()
jadmanski3d161b02008-06-06 15:43:36 +00001580
1581
showard5add1c82009-05-26 19:27:46 +00001582 def _setup_post_job_task_expects(self, autoserv_success, hqe_status=None,
showard6b733412009-04-27 20:09:18 +00001583 hqe_aborted=False):
showarded2afea2009-07-07 20:54:07 +00001584 self.queue_entry.execution_path.expect_call().and_return('tag')
showard170873e2009-01-07 00:22:26 +00001585 self.pidfile_monitor = monitor_db.PidfileRunMonitor.expect_new()
1586 self.pidfile_monitor.pidfile_id = self.PIDFILE_ID
1587 self.pidfile_monitor.attach_to_existing_process.expect_call('tag')
1588 if autoserv_success:
1589 code = 0
1590 else:
1591 code = 1
showardd3dc1992009-04-22 21:01:40 +00001592 self.queue_entry.update_from_database.expect_call()
showard6b733412009-04-27 20:09:18 +00001593 self.queue_entry.aborted = hqe_aborted
1594 if not hqe_aborted:
1595 self.pidfile_monitor.exit_code.expect_call().and_return(code)
showard170873e2009-01-07 00:22:26 +00001596
showard5add1c82009-05-26 19:27:46 +00001597 if hqe_status:
1598 self.queue_entry.set_status.expect_call(hqe_status)
showardd3dc1992009-04-22 21:01:40 +00001599
1600
1601 def _setup_pre_parse_expects(self, autoserv_success):
1602 self._setup_post_job_task_expects(autoserv_success, 'Parsing')
showard97aed502008-11-04 02:01:24 +00001603
1604
1605 def _setup_post_parse_expects(self, autoserv_success):
showard97aed502008-11-04 02:01:24 +00001606 if autoserv_success:
showard170873e2009-01-07 00:22:26 +00001607 status = 'Completed'
showard97aed502008-11-04 02:01:24 +00001608 else:
showard170873e2009-01-07 00:22:26 +00001609 status = 'Failed'
showard97aed502008-11-04 02:01:24 +00001610 self.queue_entry.set_status.expect_call(status)
1611
1612
showard5add1c82009-05-26 19:27:46 +00001613 def _expect_execute_run_monitor(self):
1614 self.monitor.exit_code.expect_call()
1615 self.monitor.exit_code.expect_call().and_return(0)
1616 self._expect_copy_results()
1617
1618
showardd3dc1992009-04-22 21:01:40 +00001619 def _setup_post_job_run_monitor(self, pidfile_name):
showard678df4f2009-02-04 21:36:39 +00001620 self.pidfile_monitor.has_process.expect_call().and_return(True)
showard170873e2009-01-07 00:22:26 +00001621 autoserv_pidfile_id = object()
showardd3dc1992009-04-22 21:01:40 +00001622 self.monitor = monitor_db.PidfileRunMonitor.expect_new()
1623 self.monitor.run.expect_call(
showard170873e2009-01-07 00:22:26 +00001624 mock.is_instance_comparator(list),
1625 'tag',
showardd3dc1992009-04-22 21:01:40 +00001626 nice_level=monitor_db.AUTOSERV_NICE_LEVEL,
showard170873e2009-01-07 00:22:26 +00001627 log_file=mock.anything_comparator(),
showardd3dc1992009-04-22 21:01:40 +00001628 pidfile_name=pidfile_name,
showard170873e2009-01-07 00:22:26 +00001629 paired_with_pidfile=self.PIDFILE_ID)
showard5add1c82009-05-26 19:27:46 +00001630 self._expect_execute_run_monitor()
showardd3dc1992009-04-22 21:01:40 +00001631
1632
showard6b733412009-04-27 20:09:18 +00001633 def _expect_copy_results(self, monitor=None, queue_entry=None):
1634 if monitor is None:
1635 monitor = self.monitor
1636 monitor.has_process.expect_call().and_return(True)
1637 if queue_entry:
showarded2afea2009-07-07 20:54:07 +00001638 queue_entry.execution_path.expect_call().and_return('tag')
showard6b733412009-04-27 20:09:18 +00001639 monitor.get_process.expect_call().and_return(self.DUMMY_PROCESS)
showard170873e2009-01-07 00:22:26 +00001640 drone_manager.DroneManager.copy_to_results_repository.expect_call(
1641 self.DUMMY_PROCESS, mock.is_string_comparator())
showard97aed502008-11-04 02:01:24 +00001642
showard170873e2009-01-07 00:22:26 +00001643
1644 def _test_final_reparse_task_helper(self, autoserv_success=True):
1645 self._setup_pre_parse_expects(autoserv_success)
showardd3dc1992009-04-22 21:01:40 +00001646 self._setup_post_job_run_monitor(monitor_db._PARSER_PID_FILE)
showard97aed502008-11-04 02:01:24 +00001647 self._setup_post_parse_expects(autoserv_success)
1648
1649 task = monitor_db.FinalReparseTask([self.queue_entry])
1650 self.run_task(task, True)
1651
1652 self.god.check_playback()
showard170873e2009-01-07 00:22:26 +00001653 cmd = [monitor_db._parser_path, '--write-pidfile', '-l', '2', '-r',
mbligh2d7c8bd2009-05-13 20:42:50 +00001654 '-o', '-P', '/abspath/tag']
showard97aed502008-11-04 02:01:24 +00001655 self.assertEquals(task.cmd, cmd)
1656
1657
1658 def test_final_reparse_task(self):
1659 self.god.stub_class(monitor_db, 'PidfileRunMonitor')
1660 self._test_final_reparse_task_helper()
showard97aed502008-11-04 02:01:24 +00001661 self._test_final_reparse_task_helper(autoserv_success=False)
1662
1663
1664 def test_final_reparse_throttling(self):
1665 self.god.stub_class(monitor_db, 'PidfileRunMonitor')
1666 self.god.stub_function(monitor_db.FinalReparseTask,
1667 '_can_run_new_parse')
1668
showard170873e2009-01-07 00:22:26 +00001669 self._setup_pre_parse_expects(True)
showard97aed502008-11-04 02:01:24 +00001670 monitor_db.FinalReparseTask._can_run_new_parse.expect_call().and_return(
1671 False)
1672 monitor_db.FinalReparseTask._can_run_new_parse.expect_call().and_return(
1673 True)
showardd3dc1992009-04-22 21:01:40 +00001674 self._setup_post_job_run_monitor(monitor_db._PARSER_PID_FILE)
showard97aed502008-11-04 02:01:24 +00001675 self._setup_post_parse_expects(True)
1676
1677 task = monitor_db.FinalReparseTask([self.queue_entry])
1678 self.run_task(task, True)
1679 self.god.check_playback()
showard1be97432008-10-17 15:30:45 +00001680
1681
showard5add1c82009-05-26 19:27:46 +00001682 def test_final_reparse_recovery(self):
1683 self.god.stub_class(monitor_db, 'PidfileRunMonitor')
1684 self.monitor = self.god.create_mock_class(monitor_db.PidfileRunMonitor,
1685 'run_monitor')
1686 self._setup_post_job_task_expects(True)
1687 self._expect_execute_run_monitor()
1688 self._setup_post_parse_expects(True)
1689
1690 task = monitor_db.FinalReparseTask([self.queue_entry],
showarded2afea2009-07-07 20:54:07 +00001691 recover_run_monitor=self.monitor)
showard5add1c82009-05-26 19:27:46 +00001692 self.run_task(task, True)
1693 self.god.check_playback()
1694
1695
showard597bfd32009-05-08 18:22:50 +00001696 def _setup_gather_logs_expects(self, autoserv_killed=True,
showard0bbfc212009-04-29 21:06:13 +00001697 hqe_aborted=False):
showardd3dc1992009-04-22 21:01:40 +00001698 self.god.stub_class(monitor_db, 'PidfileRunMonitor')
1699 self.god.stub_class(monitor_db, 'FinalReparseTask')
showard597bfd32009-05-08 18:22:50 +00001700 self._setup_post_job_task_expects(not autoserv_killed, 'Gathering',
showard0bbfc212009-04-29 21:06:13 +00001701 hqe_aborted)
showard597bfd32009-05-08 18:22:50 +00001702 if hqe_aborted:
1703 exit_code = None
1704 elif autoserv_killed:
1705 exit_code = 271
1706 else:
1707 exit_code = 0
1708 self.pidfile_monitor.exit_code.expect_call().and_return(exit_code)
1709 if exit_code != 0:
showard0bbfc212009-04-29 21:06:13 +00001710 self._setup_post_job_run_monitor('.collect_crashinfo_execute')
showardebc0fb72009-05-13 21:28:07 +00001711 self.pidfile_monitor.has_process.expect_call().and_return(True)
showard6b733412009-04-27 20:09:18 +00001712 self._expect_copy_results(monitor=self.pidfile_monitor,
1713 queue_entry=self.queue_entry)
showardd3dc1992009-04-22 21:01:40 +00001714 parse_task = monitor_db.FinalReparseTask.expect_new([self.queue_entry])
showard6b733412009-04-27 20:09:18 +00001715 _set_host_and_qe_ids(parse_task)
showardd3dc1992009-04-22 21:01:40 +00001716 self._dispatcher.add_agent.expect_call(IsAgentWithTask(parse_task))
1717
showardb5626452009-06-30 01:57:28 +00001718 self.pidfile_monitor.num_tests_failed.expect_call().and_return(0)
1719
showardd3dc1992009-04-22 21:01:40 +00001720
showard6b733412009-04-27 20:09:18 +00001721 def _run_gather_logs_task(self):
showardd3dc1992009-04-22 21:01:40 +00001722 task = monitor_db.GatherLogsTask(self.job, [self.queue_entry])
1723 task.agent = DummyAgent()
1724 task.agent.dispatcher = self._dispatcher
1725 self.run_task(task, True)
showardd3dc1992009-04-22 21:01:40 +00001726 self.god.check_playback()
1727
1728
showard6b733412009-04-27 20:09:18 +00001729 def test_gather_logs_task(self):
1730 self._setup_gather_logs_expects()
1731 # no rebooting for this basic test
1732 self.job.reboot_after = models.RebootAfter.NEVER
1733 self.host.set_status.expect_call('Ready')
1734
1735 self._run_gather_logs_task()
1736
1737
showard0bbfc212009-04-29 21:06:13 +00001738 def test_gather_logs_task_successful_autoserv(self):
showard597bfd32009-05-08 18:22:50 +00001739 # When Autoserv exits successfully, no collect_crashinfo stage runs
1740 self._setup_gather_logs_expects(autoserv_killed=False)
showard0bbfc212009-04-29 21:06:13 +00001741 self.job.reboot_after = models.RebootAfter.NEVER
1742 self.host.set_status.expect_call('Ready')
1743
1744 self._run_gather_logs_task()
1745
1746
showard6b733412009-04-27 20:09:18 +00001747 def _setup_gather_task_cleanup_expects(self):
1748 self.god.stub_class(monitor_db, 'CleanupTask')
1749 cleanup_task = monitor_db.CleanupTask.expect_new(host=self.host)
1750 _set_host_and_qe_ids(cleanup_task)
1751 self._dispatcher.add_agent.expect_call(IsAgentWithTask(cleanup_task))
1752
1753
1754 def test_gather_logs_reboot_hosts(self):
1755 self._setup_gather_logs_expects()
1756 self.job.reboot_after = models.RebootAfter.ALWAYS
1757 self._setup_gather_task_cleanup_expects()
1758
1759 self._run_gather_logs_task()
1760
1761
1762 def test_gather_logs_reboot_on_abort(self):
1763 self._setup_gather_logs_expects(hqe_aborted=True)
1764 self.job.reboot_after = models.RebootAfter.NEVER
1765 self._setup_gather_task_cleanup_expects()
1766
1767 self._run_gather_logs_task()
1768
1769
showarded2afea2009-07-07 20:54:07 +00001770 def _test_cleanup_task_helper(self, success, task_tag,
1771 use_queue_entry=False):
showardfa8629c2008-11-04 16:51:23 +00001772 if use_queue_entry:
1773 self.queue_entry.get_host.expect_call().and_return(self.host)
showard45ae8192008-11-05 19:32:53 +00001774 self.host.set_status.expect_call('Cleaning')
showardfa8629c2008-11-04 16:51:23 +00001775 if success:
showarded2afea2009-07-07 20:54:07 +00001776 self.setup_run_monitor(0, task_tag)
showardfa8629c2008-11-04 16:51:23 +00001777 self.host.set_status.expect_call('Ready')
1778 self.host.update_field.expect_call('dirty', 0)
1779 else:
showarded2afea2009-07-07 20:54:07 +00001780 self.setup_run_monitor(1, task_tag)
showard8fe93b52008-11-18 17:53:22 +00001781 if use_queue_entry and not self.queue_entry.meta_host:
1782 self.queue_entry.set_execution_subdir.expect_call()
showarded2afea2009-07-07 20:54:07 +00001783 self.queue_entry.execution_path.expect_call().and_return('tag')
showard170873e2009-01-07 00:22:26 +00001784 self._setup_move_logfile(include_destination=True)
showardfa8629c2008-11-04 16:51:23 +00001785
1786 if use_queue_entry:
showard45ae8192008-11-05 19:32:53 +00001787 task = monitor_db.CleanupTask(queue_entry=self.queue_entry)
showardfa8629c2008-11-04 16:51:23 +00001788 else:
showard45ae8192008-11-05 19:32:53 +00001789 task = monitor_db.CleanupTask(host=self.host)
showardfa8629c2008-11-04 16:51:23 +00001790 self.assertEquals(len(task.failure_tasks), 1)
1791 repair_task = task.failure_tasks[0]
1792 self.assert_(isinstance(repair_task, monitor_db.RepairTask))
1793 if use_queue_entry:
showard2fe3f1d2009-07-06 20:19:11 +00001794 self.assertEquals(repair_task.queue_entry, self.queue_entry)
showardfa8629c2008-11-04 16:51:23 +00001795
1796 self.run_task(task, success)
1797
1798 self.god.check_playback()
showard170873e2009-01-07 00:22:26 +00001799 self.assert_(set(task.cmd) >=
showarded2afea2009-07-07 20:54:07 +00001800 set([monitor_db._autoserv_path, '-p', '--cleanup', '-m',
1801 self.HOSTNAME, '-r',
1802 drone_manager.WORKING_DIRECTORY]))
showard87ba02a2009-04-20 19:37:32 +00001803 if use_queue_entry:
1804 self.assertTrue(set(task.cmd) >= self.JOB_AUTOSERV_PARAMS)
showardfa8629c2008-11-04 16:51:23 +00001805
showard45ae8192008-11-05 19:32:53 +00001806 def test_cleanup_task(self):
showarded2afea2009-07-07 20:54:07 +00001807 self._test_cleanup_task_helper(True, '1-cleanup')
1808 self._test_cleanup_task_helper(False, '2-cleanup')
showardfa8629c2008-11-04 16:51:23 +00001809
1810
showard45ae8192008-11-05 19:32:53 +00001811 def test_cleanup_task_with_queue_entry(self):
showarded2afea2009-07-07 20:54:07 +00001812 self._test_cleanup_task_helper(False, '1-cleanup', True)
showardfa8629c2008-11-04 16:51:23 +00001813
1814
showard5add1c82009-05-26 19:27:46 +00001815 def test_recovery_queue_task_aborted_early(self):
showarded2afea2009-07-07 20:54:07 +00001816 # abort a recovery QueueTask right after it's created
showard5add1c82009-05-26 19:27:46 +00001817 self.god.stub_class_method(monitor_db.QueueTask, '_log_abort')
1818 self.god.stub_class_method(monitor_db.QueueTask, '_finish_task')
1819 run_monitor = self.god.create_mock_class(monitor_db.PidfileRunMonitor,
1820 'run_monitor')
1821
showarded2afea2009-07-07 20:54:07 +00001822 self.queue_entry.execution_path.expect_call().and_return('tag')
showard5add1c82009-05-26 19:27:46 +00001823 run_monitor.kill.expect_call()
1824 run_monitor.has_process.expect_call().and_return(True)
1825 monitor_db.QueueTask._log_abort.expect_call()
1826 monitor_db.QueueTask._finish_task.expect_call()
1827
showarded2afea2009-07-07 20:54:07 +00001828 task = monitor_db.QueueTask(self.job, [self.queue_entry],
1829 recover_run_monitor=run_monitor)
showard5add1c82009-05-26 19:27:46 +00001830 task.abort()
1831 self.assert_(task.aborted)
1832 self.god.check_playback()
1833
1834
showard54c1ea92009-05-20 00:32:58 +00001835class HostTest(BaseSchedulerTest):
1836 def test_cmp_for_sort(self):
1837 expected_order = [
1838 'alice', 'Host1', 'host2', 'host3', 'host09', 'HOST010',
1839 'host10', 'host11', 'yolkfolk']
1840 hostname_idx = list(monitor_db.Host._fields).index('hostname')
1841 row = [None] * len(monitor_db.Host._fields)
1842 hosts = []
1843 for hostname in expected_order:
1844 row[hostname_idx] = hostname
1845 hosts.append(monitor_db.Host(row=row, new_record=True))
1846
1847 host1 = hosts[expected_order.index('Host1')]
1848 host010 = hosts[expected_order.index('HOST010')]
1849 host10 = hosts[expected_order.index('host10')]
1850 host3 = hosts[expected_order.index('host3')]
1851 alice = hosts[expected_order.index('alice')]
1852 self.assertEqual(0, monitor_db.Host.cmp_for_sort(host10, host10))
1853 self.assertEqual(1, monitor_db.Host.cmp_for_sort(host10, host010))
1854 self.assertEqual(-1, monitor_db.Host.cmp_for_sort(host010, host10))
1855 self.assertEqual(-1, monitor_db.Host.cmp_for_sort(host1, host10))
1856 self.assertEqual(-1, monitor_db.Host.cmp_for_sort(host1, host010))
1857 self.assertEqual(-1, monitor_db.Host.cmp_for_sort(host3, host10))
1858 self.assertEqual(-1, monitor_db.Host.cmp_for_sort(host3, host010))
1859 self.assertEqual(1, monitor_db.Host.cmp_for_sort(host3, host1))
1860 self.assertEqual(-1, monitor_db.Host.cmp_for_sort(host1, host3))
1861 self.assertEqual(-1, monitor_db.Host.cmp_for_sort(alice, host3))
1862 self.assertEqual(1, monitor_db.Host.cmp_for_sort(host3, alice))
1863 self.assertEqual(0, monitor_db.Host.cmp_for_sort(alice, alice))
1864
1865 hosts.sort(cmp=monitor_db.Host.cmp_for_sort)
1866 self.assertEqual(expected_order, [h.hostname for h in hosts])
1867
1868 hosts.reverse()
1869 hosts.sort(cmp=monitor_db.Host.cmp_for_sort)
1870 self.assertEqual(expected_order, [h.hostname for h in hosts])
1871
1872
showardf1ae3542009-05-11 19:26:02 +00001873class HostQueueEntryTest(BaseSchedulerTest):
1874 def _create_hqe(self, dependency_labels=(), **create_job_kwargs):
1875 job = self._create_job(**create_job_kwargs)
1876 for label in dependency_labels:
1877 job.dependency_labels.add(label)
1878 hqes = list(monitor_db.HostQueueEntry.fetch(where='job_id=%d' % job.id))
1879 self.assertEqual(1, len(hqes))
1880 return hqes[0]
1881
showard77182562009-06-10 00:16:05 +00001882
showardf1ae3542009-05-11 19:26:02 +00001883 def _check_hqe_labels(self, hqe, expected_labels):
1884 expected_labels = set(expected_labels)
1885 label_names = set(label.name for label in hqe.get_labels())
1886 self.assertEqual(expected_labels, label_names)
1887
showard77182562009-06-10 00:16:05 +00001888
showardf1ae3542009-05-11 19:26:02 +00001889 def test_get_labels_empty(self):
1890 hqe = self._create_hqe(hosts=[1])
1891 labels = list(hqe.get_labels())
1892 self.assertEqual([], labels)
1893
showard77182562009-06-10 00:16:05 +00001894
showardf1ae3542009-05-11 19:26:02 +00001895 def test_get_labels_metahost(self):
1896 hqe = self._create_hqe(metahosts=[2])
1897 self._check_hqe_labels(hqe, ['label2'])
1898
showard77182562009-06-10 00:16:05 +00001899
showardf1ae3542009-05-11 19:26:02 +00001900 def test_get_labels_dependancies(self):
1901 hqe = self._create_hqe(dependency_labels=(self.label3, self.label4),
1902 metahosts=[1])
1903 self._check_hqe_labels(hqe, ['label1', 'label3', 'label4'])
1904
1905
showardb2e2c322008-10-14 17:33:55 +00001906class JobTest(BaseSchedulerTest):
showard2bab8f42008-11-12 18:15:22 +00001907 def setUp(self):
1908 super(JobTest, self).setUp()
showard170873e2009-01-07 00:22:26 +00001909 self.god.stub_with(
1910 drone_manager.DroneManager, 'attach_file_to_execution',
1911 mock.mock_function('attach_file_to_execution',
1912 default_return_val='/test/path/tmp/foo'))
showard2bab8f42008-11-12 18:15:22 +00001913
1914
showard77182562009-06-10 00:16:05 +00001915 def _test_pre_job_tasks_helper(self):
1916 """
1917 Calls HQE._do_pre_run_job_tasks() and returns the task list after
1918 confirming that the last task is the SetEntryPendingTask.
1919 """
1920 queue_entry = monitor_db.HostQueueEntry.fetch('id = 1').next()
1921 pre_job_agent = queue_entry._do_run_pre_job_tasks()
1922 self.assert_(isinstance(pre_job_agent, monitor_db.Agent))
1923 pre_job_tasks = list(pre_job_agent.queue.queue)
1924 self.assertTrue(isinstance(pre_job_tasks[-1],
1925 monitor_db.SetEntryPendingTask))
1926
1927 return pre_job_tasks
showard2bab8f42008-11-12 18:15:22 +00001928
1929
showarde58e3f82008-11-20 19:04:59 +00001930 def _test_run_helper(self, expect_agent=True, expect_starting=False,
1931 expect_pending=False):
1932 if expect_starting:
1933 expected_status = models.HostQueueEntry.Status.STARTING
1934 elif expect_pending:
1935 expected_status = models.HostQueueEntry.Status.PENDING
1936 else:
1937 expected_status = models.HostQueueEntry.Status.VERIFYING
showardb2e2c322008-10-14 17:33:55 +00001938 job = monitor_db.Job.fetch('id = 1').next()
1939 queue_entry = monitor_db.HostQueueEntry.fetch('id = 1').next()
showard77182562009-06-10 00:16:05 +00001940 assert queue_entry.job is job
1941 agent = job.run_if_ready(queue_entry)
showardb2e2c322008-10-14 17:33:55 +00001942
showard2bab8f42008-11-12 18:15:22 +00001943 self.god.check_playback()
showard77182562009-06-10 00:16:05 +00001944 actual_status = models.HostQueueEntry.smart_get(1).status
1945 self.assertEquals(expected_status, actual_status)
showard2bab8f42008-11-12 18:15:22 +00001946
showard9976ce92008-10-15 20:28:13 +00001947 if not expect_agent:
1948 self.assertEquals(agent, None)
1949 return
1950
showardb2e2c322008-10-14 17:33:55 +00001951 self.assert_(isinstance(agent, monitor_db.Agent))
1952 tasks = list(agent.queue.queue)
1953 return tasks
1954
1955
showardc9ae1782009-01-30 01:42:37 +00001956 def _check_verify_task(self, verify_task):
1957 self.assert_(isinstance(verify_task, monitor_db.VerifyTask))
1958 self.assertEquals(verify_task.queue_entry.id, 1)
1959
1960
1961 def _check_pending_task(self, pending_task):
1962 self.assert_(isinstance(pending_task, monitor_db.SetEntryPendingTask))
1963 self.assertEquals(pending_task._queue_entry.id, 1)
1964
1965
showard77182562009-06-10 00:16:05 +00001966 def test_run_if_ready_delays(self):
1967 # Also tests Job.run_with_ready_delay() on atomic group jobs.
1968 django_job = self._create_job(hosts=[5, 6], atomic_group=1)
1969 job = monitor_db.Job(django_job.id)
1970 self.assertEqual(1, job.synch_count)
1971 django_hqes = list(models.HostQueueEntry.objects.filter(job=job.id))
1972 self.assertEqual(2, len(django_hqes))
1973 self.assertEqual(2, django_hqes[0].atomic_group.max_number_of_machines)
1974
1975 def set_hqe_status(django_hqe, status):
1976 django_hqe.status = status
1977 django_hqe.save()
1978 monitor_db.HostQueueEntry(django_hqe.id).host.set_status(status)
1979
1980 # An initial state, our synch_count is 1
1981 set_hqe_status(django_hqes[0], models.HostQueueEntry.Status.VERIFYING)
1982 set_hqe_status(django_hqes[1], models.HostQueueEntry.Status.PENDING)
1983
1984 # So that we don't depend on the config file value during the test.
1985 self.assert_(scheduler_config.config
1986 .secs_to_wait_for_atomic_group_hosts is not None)
1987 self.god.stub_with(scheduler_config.config,
1988 'secs_to_wait_for_atomic_group_hosts', 123456)
1989
1990 # Get the pending one as a monitor_db.HostQueueEntry object.
1991 pending_hqe = monitor_db.HostQueueEntry(django_hqes[1].id)
1992 self.assert_(not job._delay_ready_task)
1993 self.assertTrue(job.is_ready())
1994
1995 # Ready with one pending, one verifying and an atomic group should
1996 # result in a DelayCallTask to re-check if we're ready a while later.
1997 agent = job.run_if_ready(pending_hqe)
1998 self.assert_(job._delay_ready_task)
1999 self.assert_(isinstance(agent, monitor_db.Agent))
2000 tasks = list(agent.queue.queue)
2001 self.assertEqual(1, len(tasks))
2002 self.assert_(isinstance(tasks[0], monitor_db.DelayedCallTask))
2003 delay_task = tasks[0]
2004 self.assert_(not delay_task.is_done())
2005
2006 self.god.stub_function(job, 'run')
2007
2008 # Test that the DelayedCallTask's callback queued up above does the
2009 # correct thing and returns the Agent returned by job.run().
2010 job.run.expect_call(pending_hqe).and_return('Fake Agent')
2011 self.assertEqual('Fake Agent', delay_task._callback())
2012
2013 # A delay already exists, this must do nothing.
2014 self.assertEqual(None, job.run_with_ready_delay(pending_hqe))
2015
2016 # Adjust the delay deadline so that enough time has passed.
2017 job._delay_ready_task.end_time = time.time() - 111111
2018 job.run.expect_call(pending_hqe).and_return('Forty two')
2019 # ...the delay_expired condition should cause us to call run()
2020 self.assertEqual('Forty two', job.run_with_ready_delay(pending_hqe))
2021
2022 # Adjust the delay deadline back so that enough time has not passed.
2023 job._delay_ready_task.end_time = time.time() + 111111
2024 self.assertEqual(None, job.run_with_ready_delay(pending_hqe))
2025
2026 set_hqe_status(django_hqes[0], models.HostQueueEntry.Status.PENDING)
2027 # Now max_number_of_machines HQEs are in pending state. Remaining
2028 # delay will now be ignored.
2029 job.run.expect_call(pending_hqe).and_return('Watermelon')
2030 # ...the over_max_threshold test should cause us to call run()
2031 self.assertEqual('Watermelon', job.run_with_ready_delay(pending_hqe))
2032
2033 other_hqe = monitor_db.HostQueueEntry(django_hqes[0].id)
2034 self.assertTrue(pending_hqe.job is other_hqe.job)
2035 # DBObject classes should reuse instances so these should be the same.
2036 self.assertEqual(job, other_hqe.job)
2037 self.assertEqual(other_hqe.job, pending_hqe.job)
2038 # Be sure our delay was not lost during the other_hqe construction.
2039 self.assert_(job._delay_ready_task)
2040 self.assertFalse(job._delay_ready_task.is_done())
2041 self.assertFalse(job._delay_ready_task.aborted)
2042
2043 # We want the real run() to be called below.
2044 self.god.unstub(job, 'run')
2045
2046 # We pass in the other HQE this time the same way it would happen
2047 # for real when one host finishes verifying and enters pending.
2048 agent = job.run_if_ready(other_hqe)
2049
2050 # The delayed task must be aborted by the actual run() call above.
2051 self.assertTrue(job._delay_ready_task.aborted)
2052 self.assertFalse(job._delay_ready_task.success)
2053 self.assertTrue(job._delay_ready_task.is_done())
2054
2055 # Check that job run() and _finish_run() were called by the above:
2056 tasks = list(agent.queue.queue)
2057 self.assertEqual(1, len(tasks))
2058 self.assert_(isinstance(tasks[0], monitor_db.QueueTask))
2059 # Requery these hqes in order to verify the status from the DB.
2060 django_hqes = list(models.HostQueueEntry.objects.filter(job=job.id))
2061 for entry in django_hqes:
2062 self.assertEqual(models.HostQueueEntry.Status.STARTING,
2063 entry.status)
2064
2065 # We're already running, but more calls to run_with_ready_delay can
2066 # continue to come in due to straggler hosts enter Pending. Make
2067 # sure we don't do anything.
2068 self.assertEqual(None, job.run_with_ready_delay(pending_hqe))
2069
2070
2071 def test__atomic_and_has_started__on_atomic(self):
2072 self._create_job(hosts=[5, 6], atomic_group=1)
2073 job = monitor_db.Job.fetch('id = 1').next()
2074 self.assertFalse(job._atomic_and_has_started())
showardaf8b4ca2009-06-16 18:47:26 +00002075
showard77182562009-06-10 00:16:05 +00002076 self._update_hqe("status='Pending'")
2077 self.assertFalse(job._atomic_and_has_started())
2078 self._update_hqe("status='Verifying'")
2079 self.assertFalse(job._atomic_and_has_started())
showardaf8b4ca2009-06-16 18:47:26 +00002080 self.assertFalse(job._atomic_and_has_started())
2081 self._update_hqe("status='Failed'")
2082 self.assertFalse(job._atomic_and_has_started())
2083 self._update_hqe("status='Stopped'")
2084 self.assertFalse(job._atomic_and_has_started())
2085
showard77182562009-06-10 00:16:05 +00002086 self._update_hqe("status='Starting'")
2087 self.assertTrue(job._atomic_and_has_started())
2088 self._update_hqe("status='Completed'")
2089 self.assertTrue(job._atomic_and_has_started())
2090 self._update_hqe("status='Aborted'")
showard77182562009-06-10 00:16:05 +00002091
2092
2093 def test__atomic_and_has_started__not_atomic(self):
2094 self._create_job(hosts=[1, 2])
2095 job = monitor_db.Job.fetch('id = 1').next()
2096 self.assertFalse(job._atomic_and_has_started())
2097 self._update_hqe("status='Starting'")
2098 self.assertFalse(job._atomic_and_has_started())
2099
2100
showardb2e2c322008-10-14 17:33:55 +00002101 def test_run_asynchronous(self):
2102 self._create_job(hosts=[1, 2])
2103
showard77182562009-06-10 00:16:05 +00002104 tasks = self._test_pre_job_tasks_helper()
showardb2e2c322008-10-14 17:33:55 +00002105
showardc9ae1782009-01-30 01:42:37 +00002106 self.assertEquals(len(tasks), 2)
2107 verify_task, pending_task = tasks
2108 self._check_verify_task(verify_task)
2109 self._check_pending_task(pending_task)
showardb2e2c322008-10-14 17:33:55 +00002110
showardb2e2c322008-10-14 17:33:55 +00002111
showard9976ce92008-10-15 20:28:13 +00002112 def test_run_asynchronous_skip_verify(self):
2113 job = self._create_job(hosts=[1, 2])
2114 job.run_verify = False
2115 job.save()
2116
showard77182562009-06-10 00:16:05 +00002117 tasks = self._test_pre_job_tasks_helper()
showard9976ce92008-10-15 20:28:13 +00002118
2119 self.assertEquals(len(tasks), 1)
showardc9ae1782009-01-30 01:42:37 +00002120 pending_task = tasks[0]
2121 self._check_pending_task(pending_task)
showard9976ce92008-10-15 20:28:13 +00002122
2123
showardb2e2c322008-10-14 17:33:55 +00002124 def test_run_synchronous_verify(self):
2125 self._create_job(hosts=[1, 2], synchronous=True)
2126
showard77182562009-06-10 00:16:05 +00002127 tasks = self._test_pre_job_tasks_helper()
showardc9ae1782009-01-30 01:42:37 +00002128 self.assertEquals(len(tasks), 2)
2129 verify_task, pending_task = tasks
2130 self._check_verify_task(verify_task)
2131 self._check_pending_task(pending_task)
showardb2e2c322008-10-14 17:33:55 +00002132
2133
showard9976ce92008-10-15 20:28:13 +00002134 def test_run_synchronous_skip_verify(self):
2135 job = self._create_job(hosts=[1, 2], synchronous=True)
2136 job.run_verify = False
2137 job.save()
2138
showard77182562009-06-10 00:16:05 +00002139 tasks = self._test_pre_job_tasks_helper()
showardc9ae1782009-01-30 01:42:37 +00002140 self.assertEquals(len(tasks), 1)
2141 self._check_pending_task(tasks[0])
showard9976ce92008-10-15 20:28:13 +00002142
2143
showardb2e2c322008-10-14 17:33:55 +00002144 def test_run_synchronous_ready(self):
2145 self._create_job(hosts=[1, 2], synchronous=True)
showardd9ac4452009-02-07 02:04:37 +00002146 self._update_hqe("status='Pending', execution_subdir=''")
showardb2e2c322008-10-14 17:33:55 +00002147
showarde58e3f82008-11-20 19:04:59 +00002148 tasks = self._test_run_helper(expect_starting=True)
showardb2e2c322008-10-14 17:33:55 +00002149 self.assertEquals(len(tasks), 1)
2150 queue_task = tasks[0]
2151
2152 self.assert_(isinstance(queue_task, monitor_db.QueueTask))
2153 self.assertEquals(queue_task.job.id, 1)
2154 hqe_ids = [hqe.id for hqe in queue_task.queue_entries]
2155 self.assertEquals(hqe_ids, [1, 2])
2156
2157
showard77182562009-06-10 00:16:05 +00002158 def test_run_atomic_group_already_started(self):
2159 self._create_job(hosts=[5, 6], atomic_group=1, synchronous=True)
2160 self._update_hqe("status='Starting', execution_subdir=''")
2161
2162 job = monitor_db.Job.fetch('id = 1').next()
2163 queue_entry = monitor_db.HostQueueEntry.fetch('id = 1').next()
2164 assert queue_entry.job is job
2165 self.assertEqual(None, job.run(queue_entry))
2166
2167 self.god.check_playback()
2168
2169
showardf1ae3542009-05-11 19:26:02 +00002170 def test_run_synchronous_atomic_group_ready(self):
2171 self._create_job(hosts=[5, 6], atomic_group=1, synchronous=True)
2172 self._update_hqe("status='Pending', execution_subdir=''")
2173
2174 tasks = self._test_run_helper(expect_starting=True)
2175 self.assertEquals(len(tasks), 1)
2176 queue_task = tasks[0]
2177
2178 self.assert_(isinstance(queue_task, monitor_db.QueueTask))
showard77182562009-06-10 00:16:05 +00002179 # Atomic group jobs that do not depend on a specific label in the
2180 # atomic group will use the atomic group name as their group name.
showardf1ae3542009-05-11 19:26:02 +00002181 self.assertEquals(queue_task.group_name, 'atomic1')
2182
2183
2184 def test_run_synchronous_atomic_group_with_label_ready(self):
2185 job = self._create_job(hosts=[5, 6], atomic_group=1, synchronous=True)
2186 job.dependency_labels.add(self.label4)
2187 self._update_hqe("status='Pending', execution_subdir=''")
2188
2189 tasks = self._test_run_helper(expect_starting=True)
2190 self.assertEquals(len(tasks), 1)
2191 queue_task = tasks[0]
2192
2193 self.assert_(isinstance(queue_task, monitor_db.QueueTask))
2194 # Atomic group jobs that also specify a label in the atomic group
2195 # will use the label name as their group name.
2196 self.assertEquals(queue_task.group_name, 'label4')
2197
2198
showard21baa452008-10-21 00:08:39 +00002199 def test_reboot_before_always(self):
2200 job = self._create_job(hosts=[1])
showard0fc38302008-10-23 00:44:07 +00002201 job.reboot_before = models.RebootBefore.ALWAYS
showard21baa452008-10-21 00:08:39 +00002202 job.save()
2203
showard77182562009-06-10 00:16:05 +00002204 tasks = self._test_pre_job_tasks_helper()
showardc9ae1782009-01-30 01:42:37 +00002205 self.assertEquals(len(tasks), 3)
showard45ae8192008-11-05 19:32:53 +00002206 cleanup_task = tasks[0]
2207 self.assert_(isinstance(cleanup_task, monitor_db.CleanupTask))
2208 self.assertEquals(cleanup_task.host.id, 1)
showard21baa452008-10-21 00:08:39 +00002209
2210
2211 def _test_reboot_before_if_dirty_helper(self, expect_reboot):
2212 job = self._create_job(hosts=[1])
showard0fc38302008-10-23 00:44:07 +00002213 job.reboot_before = models.RebootBefore.IF_DIRTY
showard21baa452008-10-21 00:08:39 +00002214 job.save()
2215
showard77182562009-06-10 00:16:05 +00002216 tasks = self._test_pre_job_tasks_helper()
showardc9ae1782009-01-30 01:42:37 +00002217 self.assertEquals(len(tasks), expect_reboot and 3 or 2)
showard21baa452008-10-21 00:08:39 +00002218 if expect_reboot:
showard45ae8192008-11-05 19:32:53 +00002219 cleanup_task = tasks[0]
2220 self.assert_(isinstance(cleanup_task, monitor_db.CleanupTask))
2221 self.assertEquals(cleanup_task.host.id, 1)
showard21baa452008-10-21 00:08:39 +00002222
showard77182562009-06-10 00:16:05 +00002223
showard21baa452008-10-21 00:08:39 +00002224 def test_reboot_before_if_dirty(self):
2225 models.Host.smart_get(1).update_object(dirty=True)
2226 self._test_reboot_before_if_dirty_helper(True)
2227
2228
2229 def test_reboot_before_not_dirty(self):
2230 models.Host.smart_get(1).update_object(dirty=False)
2231 self._test_reboot_before_if_dirty_helper(False)
2232
2233
showardf1ae3542009-05-11 19:26:02 +00002234 def test_next_group_name(self):
2235 django_job = self._create_job(metahosts=[1])
2236 job = monitor_db.Job(id=django_job.id)
2237 self.assertEqual('group0', job._next_group_name())
2238
2239 for hqe in django_job.hostqueueentry_set.filter():
2240 hqe.execution_subdir = 'my_rack.group0'
2241 hqe.save()
2242 self.assertEqual('my_rack.group1', job._next_group_name('my/rack'))
2243
2244
2245class TopLevelFunctionsTest(unittest.TestCase):
mblighe7d9c602009-07-02 19:02:33 +00002246 def setUp(self):
2247 self.god = mock.mock_god()
2248
2249
2250 def tearDown(self):
2251 self.god.unstub_all()
2252
2253
showardf1ae3542009-05-11 19:26:02 +00002254 def test_autoserv_command_line(self):
2255 machines = 'abcd12,efgh34'
showardf1ae3542009-05-11 19:26:02 +00002256 extra_args = ['-Z', 'hello']
2257 expected_command_line = [monitor_db._autoserv_path, '-p',
showarded2afea2009-07-07 20:54:07 +00002258 '-m', machines, '-r',
2259 drone_manager.WORKING_DIRECTORY]
showardf1ae3542009-05-11 19:26:02 +00002260
showarded2afea2009-07-07 20:54:07 +00002261 command_line = monitor_db._autoserv_command_line(machines, extra_args)
showarde9c69362009-06-30 01:58:03 +00002262 self.assertEqual(expected_command_line + ['--verbose'] + extra_args,
2263 command_line)
showardf1ae3542009-05-11 19:26:02 +00002264
2265 class FakeJob(object):
2266 owner = 'Bob'
2267 name = 'fake job name'
mblighe7d9c602009-07-02 19:02:33 +00002268 id = 1337
2269
2270 class FakeHQE(object):
2271 job = FakeJob
showardf1ae3542009-05-11 19:26:02 +00002272
2273 command_line = monitor_db._autoserv_command_line(
showarded2afea2009-07-07 20:54:07 +00002274 machines, extra_args=[], queue_entry=FakeHQE, verbose=False)
showardf1ae3542009-05-11 19:26:02 +00002275 self.assertEqual(expected_command_line +
2276 ['-u', FakeJob.owner, '-l', FakeJob.name],
2277 command_line)
2278
showard21baa452008-10-21 00:08:39 +00002279
showardce38e0c2008-05-29 19:36:16 +00002280if __name__ == '__main__':
jadmanski0afbb632008-06-06 21:10:57 +00002281 unittest.main()