showard | 1a5a408 | 2009-07-28 20:01:37 +0000 | [diff] [blame] | 1 | #!/usr/bin/python |
Justin Giorgi | 67ad67d | 2016-06-29 14:41:04 -0700 | [diff] [blame] | 2 | #pylint: disable-msg=C0111 |
showard | b6d1662 | 2009-05-26 19:35:29 +0000 | [diff] [blame] | 3 | |
Jiaxi Luo | aac5457 | 2014-06-04 13:57:02 -0700 | [diff] [blame] | 4 | import datetime |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 5 | import mox |
Justin Giorgi | 67ad67d | 2016-06-29 14:41:04 -0700 | [diff] [blame] | 6 | import unittest |
beeps | e19d303 | 2013-05-30 09:22:07 -0700 | [diff] [blame] | 7 | |
Justin Giorgi | 67ad67d | 2016-06-29 14:41:04 -0700 | [diff] [blame] | 8 | import common |
Aviv Keshet | 3dd8beb | 2013-05-13 17:36:04 -0700 | [diff] [blame] | 9 | from autotest_lib.client.common_lib import control_data |
Alex Miller | 4a19369 | 2013-08-21 13:59:01 -0700 | [diff] [blame] | 10 | from autotest_lib.client.common_lib import error |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 11 | from autotest_lib.client.common_lib import global_config |
Shuqian Zhao | 54a5b67 | 2016-05-11 22:12:17 +0000 | [diff] [blame] | 12 | from autotest_lib.client.common_lib import priorities |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 13 | from autotest_lib.client.common_lib.cros import dev_server |
Jiaxi Luo | aac5457 | 2014-06-04 13:57:02 -0700 | [diff] [blame] | 14 | from autotest_lib.client.common_lib.test_utils import mock |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 15 | from autotest_lib.frontend import setup_django_environment |
| 16 | from autotest_lib.frontend.afe import frontend_test_utils |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 17 | from autotest_lib.frontend.afe import model_logic |
| 18 | from autotest_lib.frontend.afe import models |
| 19 | from autotest_lib.frontend.afe import rpc_interface |
| 20 | from autotest_lib.frontend.afe import rpc_utils |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 21 | from autotest_lib.server import frontend |
Fang Deng | 0cb2a3b | 2015-12-10 17:59:00 -0800 | [diff] [blame] | 22 | from autotest_lib.server import utils as server_utils |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 23 | from autotest_lib.server.cros import provision |
| 24 | from autotest_lib.server.cros.dynamic_suite import constants |
| 25 | from autotest_lib.server.cros.dynamic_suite import control_file_getter |
MK Ryu | 9651ca5 | 2015-06-08 17:48:22 -0700 | [diff] [blame] | 26 | from autotest_lib.server.cros.dynamic_suite import frontend_wrappers |
showard | b6d1662 | 2009-05-26 19:35:29 +0000 | [diff] [blame] | 27 | |
Aviv Keshet | 3dd8beb | 2013-05-13 17:36:04 -0700 | [diff] [blame] | 28 | CLIENT = control_data.CONTROL_TYPE_NAMES.CLIENT |
| 29 | SERVER = control_data.CONTROL_TYPE_NAMES.SERVER |
showard | b6d1662 | 2009-05-26 19:35:29 +0000 | [diff] [blame] | 30 | |
| 31 | _hqe_status = models.HostQueueEntry.Status |
| 32 | |
| 33 | |
| 34 | class RpcInterfaceTest(unittest.TestCase, |
| 35 | frontend_test_utils.FrontendTestMixin): |
| 36 | def setUp(self): |
| 37 | self._frontend_common_setup() |
Jiaxi Luo | aac5457 | 2014-06-04 13:57:02 -0700 | [diff] [blame] | 38 | self.god = mock.mock_god() |
showard | b6d1662 | 2009-05-26 19:35:29 +0000 | [diff] [blame] | 39 | |
| 40 | |
| 41 | def tearDown(self): |
Jiaxi Luo | aac5457 | 2014-06-04 13:57:02 -0700 | [diff] [blame] | 42 | self.god.unstub_all() |
showard | b6d1662 | 2009-05-26 19:35:29 +0000 | [diff] [blame] | 43 | self._frontend_common_teardown() |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 44 | global_config.global_config.reset_config_values() |
showard | b6d1662 | 2009-05-26 19:35:29 +0000 | [diff] [blame] | 45 | |
| 46 | |
showard | a5288b4 | 2009-07-28 20:06:08 +0000 | [diff] [blame] | 47 | def test_validation(self): |
| 48 | # non-number for a numeric field |
| 49 | self.assertRaises(model_logic.ValidationError, |
| 50 | rpc_interface.add_atomic_group, name='foo', |
| 51 | max_number_of_machines='bar') |
| 52 | # omit a required field |
| 53 | self.assertRaises(model_logic.ValidationError, rpc_interface.add_label, |
| 54 | name=None) |
| 55 | # violate uniqueness constraint |
| 56 | self.assertRaises(model_logic.ValidationError, rpc_interface.add_host, |
| 57 | hostname='host1') |
| 58 | |
| 59 | |
showard | cafd16e | 2009-05-29 18:37:49 +0000 | [diff] [blame] | 60 | def test_multiple_platforms(self): |
| 61 | platform2 = models.Label.objects.create(name='platform2', platform=True) |
| 62 | self.assertRaises(model_logic.ValidationError, |
Prashanth Balasubramanian | 5949b4a | 2014-11-23 12:58:30 -0800 | [diff] [blame] | 63 | rpc_interface. label_add_hosts, id='platform2', |
| 64 | hosts=['host1', 'host2']) |
showard | cafd16e | 2009-05-29 18:37:49 +0000 | [diff] [blame] | 65 | self.assertRaises(model_logic.ValidationError, |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 66 | rpc_interface.host_add_labels, |
| 67 | id='host1', labels=['platform2']) |
showard | cafd16e | 2009-05-29 18:37:49 +0000 | [diff] [blame] | 68 | # make sure the platform didn't get added |
| 69 | platforms = rpc_interface.get_labels( |
| 70 | host__hostname__in=['host1', 'host2'], platform=True) |
| 71 | self.assertEquals(len(platforms), 1) |
| 72 | self.assertEquals(platforms[0]['name'], 'myplatform') |
| 73 | |
| 74 | |
showard | a5288b4 | 2009-07-28 20:06:08 +0000 | [diff] [blame] | 75 | def _check_hostnames(self, hosts, expected_hostnames): |
| 76 | self.assertEquals(set(host['hostname'] for host in hosts), |
| 77 | set(expected_hostnames)) |
| 78 | |
| 79 | |
| 80 | def test_get_hosts(self): |
| 81 | hosts = rpc_interface.get_hosts() |
| 82 | self._check_hostnames(hosts, [host.hostname for host in self.hosts]) |
| 83 | |
| 84 | hosts = rpc_interface.get_hosts(hostname='host1') |
| 85 | self._check_hostnames(hosts, ['host1']) |
showard | 7e67b43 | 2010-01-20 01:13:04 +0000 | [diff] [blame] | 86 | host = hosts[0] |
| 87 | self.assertEquals(sorted(host['labels']), ['label1', 'myplatform']) |
| 88 | self.assertEquals(host['platform'], 'myplatform') |
| 89 | self.assertEquals(host['atomic_group'], None) |
| 90 | self.assertEquals(host['acls'], ['my_acl']) |
| 91 | self.assertEquals(host['attributes'], {}) |
showard | a5288b4 | 2009-07-28 20:06:08 +0000 | [diff] [blame] | 92 | |
| 93 | |
| 94 | def test_get_hosts_multiple_labels(self): |
| 95 | hosts = rpc_interface.get_hosts( |
| 96 | multiple_labels=['myplatform', 'label1']) |
| 97 | self._check_hostnames(hosts, ['host1']) |
| 98 | |
| 99 | |
| 100 | def test_get_hosts_exclude_only_if_needed(self): |
| 101 | self.hosts[0].labels.add(self.label3) |
| 102 | |
| 103 | hosts = rpc_interface.get_hosts(hostname__in=['host1', 'host2'], |
| 104 | exclude_only_if_needed_labels=True) |
| 105 | self._check_hostnames(hosts, ['host2']) |
| 106 | |
| 107 | |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 108 | def test_get_hosts_exclude_atomic_group_hosts(self): |
| 109 | hosts = rpc_interface.get_hosts( |
| 110 | exclude_atomic_group_hosts=True, |
| 111 | hostname__in=['host4', 'host5', 'host6']) |
| 112 | self._check_hostnames(hosts, ['host4']) |
| 113 | |
| 114 | |
| 115 | def test_get_hosts_exclude_both(self): |
| 116 | self.hosts[0].labels.add(self.label3) |
| 117 | |
| 118 | hosts = rpc_interface.get_hosts( |
| 119 | hostname__in=['host1', 'host2', 'host5'], |
| 120 | exclude_only_if_needed_labels=True, |
| 121 | exclude_atomic_group_hosts=True) |
| 122 | self._check_hostnames(hosts, ['host2']) |
| 123 | |
| 124 | |
showard | c1a98d1 | 2010-01-15 00:22:22 +0000 | [diff] [blame] | 125 | def test_job_keyvals(self): |
| 126 | keyval_dict = {'mykey': 'myvalue'} |
Allen Li | 352b86a | 2016-12-14 12:11:27 -0800 | [diff] [blame] | 127 | job_id = rpc_interface.create_job(name='test', |
| 128 | priority=priorities.Priority.DEFAULT, |
showard | c1a98d1 | 2010-01-15 00:22:22 +0000 | [diff] [blame] | 129 | control_file='foo', |
Aviv Keshet | 3dd8beb | 2013-05-13 17:36:04 -0700 | [diff] [blame] | 130 | control_type=CLIENT, |
showard | c1a98d1 | 2010-01-15 00:22:22 +0000 | [diff] [blame] | 131 | hosts=['host1'], |
| 132 | keyvals=keyval_dict) |
| 133 | jobs = rpc_interface.get_jobs(id=job_id) |
| 134 | self.assertEquals(len(jobs), 1) |
| 135 | self.assertEquals(jobs[0]['keyvals'], keyval_dict) |
| 136 | |
| 137 | |
Aviv Keshet | cd1ff9b | 2013-03-01 14:55:19 -0800 | [diff] [blame] | 138 | def test_test_retry(self): |
Allen Li | 352b86a | 2016-12-14 12:11:27 -0800 | [diff] [blame] | 139 | job_id = rpc_interface.create_job(name='flake', |
| 140 | priority=priorities.Priority.DEFAULT, |
Aviv Keshet | cd1ff9b | 2013-03-01 14:55:19 -0800 | [diff] [blame] | 141 | control_file='foo', |
Aviv Keshet | 3dd8beb | 2013-05-13 17:36:04 -0700 | [diff] [blame] | 142 | control_type=CLIENT, |
Aviv Keshet | cd1ff9b | 2013-03-01 14:55:19 -0800 | [diff] [blame] | 143 | hosts=['host1'], |
| 144 | test_retry=10) |
| 145 | jobs = rpc_interface.get_jobs(id=job_id) |
| 146 | self.assertEquals(len(jobs), 1) |
| 147 | self.assertEquals(jobs[0]['test_retry'], 10) |
| 148 | |
| 149 | |
showard | b6d1662 | 2009-05-26 19:35:29 +0000 | [diff] [blame] | 150 | def test_get_jobs_summary(self): |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 151 | job = self._create_job(hosts=xrange(1, 4)) |
showard | b6d1662 | 2009-05-26 19:35:29 +0000 | [diff] [blame] | 152 | entries = list(job.hostqueueentry_set.all()) |
| 153 | entries[1].status = _hqe_status.FAILED |
| 154 | entries[1].save() |
| 155 | entries[2].status = _hqe_status.FAILED |
| 156 | entries[2].aborted = True |
| 157 | entries[2].save() |
| 158 | |
Jiaxi Luo | aac5457 | 2014-06-04 13:57:02 -0700 | [diff] [blame] | 159 | # Mock up tko_rpc_interface.get_status_counts. |
| 160 | self.god.stub_function_to_return(rpc_interface.tko_rpc_interface, |
| 161 | 'get_status_counts', |
| 162 | None) |
| 163 | |
showard | b6d1662 | 2009-05-26 19:35:29 +0000 | [diff] [blame] | 164 | job_summaries = rpc_interface.get_jobs_summary(id=job.id) |
| 165 | self.assertEquals(len(job_summaries), 1) |
| 166 | summary = job_summaries[0] |
| 167 | self.assertEquals(summary['status_counts'], {'Queued': 1, |
| 168 | 'Failed': 2}) |
| 169 | |
| 170 | |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 171 | def _check_job_ids(self, actual_job_dicts, expected_jobs): |
| 172 | self.assertEquals( |
| 173 | set(job_dict['id'] for job_dict in actual_job_dicts), |
| 174 | set(job.id for job in expected_jobs)) |
| 175 | |
| 176 | |
| 177 | def test_get_jobs_status_filters(self): |
showard | 6c65d25 | 2009-10-01 18:45:22 +0000 | [diff] [blame] | 178 | HqeStatus = models.HostQueueEntry.Status |
| 179 | def create_two_host_job(): |
| 180 | return self._create_job(hosts=[1, 2]) |
| 181 | def set_hqe_statuses(job, first_status, second_status): |
| 182 | entries = job.hostqueueentry_set.all() |
| 183 | entries[0].update_object(status=first_status) |
| 184 | entries[1].update_object(status=second_status) |
| 185 | |
| 186 | queued = create_two_host_job() |
| 187 | |
| 188 | queued_and_running = create_two_host_job() |
| 189 | set_hqe_statuses(queued_and_running, HqeStatus.QUEUED, |
| 190 | HqeStatus.RUNNING) |
| 191 | |
| 192 | running_and_complete = create_two_host_job() |
| 193 | set_hqe_statuses(running_and_complete, HqeStatus.RUNNING, |
| 194 | HqeStatus.COMPLETED) |
| 195 | |
| 196 | complete = create_two_host_job() |
| 197 | set_hqe_statuses(complete, HqeStatus.COMPLETED, HqeStatus.COMPLETED) |
| 198 | |
| 199 | started_but_inactive = create_two_host_job() |
| 200 | set_hqe_statuses(started_but_inactive, HqeStatus.QUEUED, |
| 201 | HqeStatus.COMPLETED) |
| 202 | |
| 203 | parsing = create_two_host_job() |
| 204 | set_hqe_statuses(parsing, HqeStatus.PARSING, HqeStatus.PARSING) |
| 205 | |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 206 | self._check_job_ids(rpc_interface.get_jobs(not_yet_run=True), [queued]) |
| 207 | self._check_job_ids(rpc_interface.get_jobs(running=True), |
showard | 6c65d25 | 2009-10-01 18:45:22 +0000 | [diff] [blame] | 208 | [queued_and_running, running_and_complete, |
| 209 | started_but_inactive, parsing]) |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 210 | self._check_job_ids(rpc_interface.get_jobs(finished=True), [complete]) |
| 211 | |
| 212 | |
| 213 | def test_get_jobs_type_filters(self): |
| 214 | self.assertRaises(AssertionError, rpc_interface.get_jobs, |
| 215 | suite=True, sub=True) |
| 216 | self.assertRaises(AssertionError, rpc_interface.get_jobs, |
| 217 | suite=True, standalone=True) |
| 218 | self.assertRaises(AssertionError, rpc_interface.get_jobs, |
| 219 | standalone=True, sub=True) |
| 220 | |
| 221 | parent_job = self._create_job(hosts=[1]) |
| 222 | child_jobs = self._create_job(hosts=[1, 2], |
| 223 | parent_job_id=parent_job.id) |
| 224 | standalone_job = self._create_job(hosts=[1]) |
| 225 | |
| 226 | self._check_job_ids(rpc_interface.get_jobs(suite=True), [parent_job]) |
| 227 | self._check_job_ids(rpc_interface.get_jobs(sub=True), [child_jobs]) |
| 228 | self._check_job_ids(rpc_interface.get_jobs(standalone=True), |
| 229 | [standalone_job]) |
showard | 6c65d25 | 2009-10-01 18:45:22 +0000 | [diff] [blame] | 230 | |
| 231 | |
showard | a5288b4 | 2009-07-28 20:06:08 +0000 | [diff] [blame] | 232 | def _create_job_helper(self, **kwargs): |
Allen Li | 352b86a | 2016-12-14 12:11:27 -0800 | [diff] [blame] | 233 | return rpc_interface.create_job(name='test', |
| 234 | priority=priorities.Priority.DEFAULT, |
MK Ryu | e301eb7 | 2015-06-25 12:51:02 -0700 | [diff] [blame] | 235 | control_file='control file', |
| 236 | control_type=SERVER, **kwargs) |
showard | a5288b4 | 2009-07-28 20:06:08 +0000 | [diff] [blame] | 237 | |
| 238 | |
showard | 2924b0a | 2009-06-18 23:16:15 +0000 | [diff] [blame] | 239 | def test_one_time_hosts(self): |
showard | a5288b4 | 2009-07-28 20:06:08 +0000 | [diff] [blame] | 240 | job = self._create_job_helper(one_time_hosts=['testhost']) |
showard | 2924b0a | 2009-06-18 23:16:15 +0000 | [diff] [blame] | 241 | host = models.Host.objects.get(hostname='testhost') |
| 242 | self.assertEquals(host.invalid, True) |
| 243 | self.assertEquals(host.labels.count(), 0) |
| 244 | self.assertEquals(host.aclgroup_set.count(), 0) |
| 245 | |
| 246 | |
showard | 09d80f9 | 2009-11-19 01:01:19 +0000 | [diff] [blame] | 247 | def test_create_job_duplicate_hosts(self): |
| 248 | self.assertRaises(model_logic.ValidationError, self._create_job_helper, |
| 249 | hosts=[1, 1]) |
| 250 | |
| 251 | |
Alex Miller | 4a19369 | 2013-08-21 13:59:01 -0700 | [diff] [blame] | 252 | def test_create_unrunnable_metahost_job(self): |
| 253 | self.assertRaises(error.NoEligibleHostException, |
| 254 | self._create_job_helper, meta_hosts=['unused']) |
| 255 | |
| 256 | |
showard | a9545c0 | 2009-12-18 22:44:26 +0000 | [diff] [blame] | 257 | def test_create_hostless_job(self): |
| 258 | job_id = self._create_job_helper(hostless=True) |
| 259 | job = models.Job.objects.get(pk=job_id) |
| 260 | queue_entries = job.hostqueueentry_set.all() |
| 261 | self.assertEquals(len(queue_entries), 1) |
| 262 | self.assertEquals(queue_entries[0].host, None) |
| 263 | self.assertEquals(queue_entries[0].meta_host, None) |
| 264 | self.assertEquals(queue_entries[0].atomic_group, None) |
| 265 | |
| 266 | |
showard | 1a5a408 | 2009-07-28 20:01:37 +0000 | [diff] [blame] | 267 | def _setup_special_tasks(self): |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 268 | host = self.hosts[0] |
| 269 | |
| 270 | job1 = self._create_job(hosts=[1]) |
| 271 | job2 = self._create_job(hosts=[1]) |
| 272 | |
| 273 | entry1 = job1.hostqueueentry_set.all()[0] |
| 274 | entry1.update_object(started_on=datetime.datetime(2009, 1, 2), |
showard | d119565 | 2009-12-08 22:21:02 +0000 | [diff] [blame] | 275 | execution_subdir='host1') |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 276 | entry2 = job2.hostqueueentry_set.all()[0] |
| 277 | entry2.update_object(started_on=datetime.datetime(2009, 1, 3), |
showard | d119565 | 2009-12-08 22:21:02 +0000 | [diff] [blame] | 278 | execution_subdir='host1') |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 279 | |
showard | 1a5a408 | 2009-07-28 20:01:37 +0000 | [diff] [blame] | 280 | self.task1 = models.SpecialTask.objects.create( |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 281 | host=host, task=models.SpecialTask.Task.VERIFY, |
| 282 | time_started=datetime.datetime(2009, 1, 1), # ran before job 1 |
jamesren | 76fcf19 | 2010-04-21 20:39:50 +0000 | [diff] [blame] | 283 | is_complete=True, requested_by=models.User.current_user()) |
showard | 1a5a408 | 2009-07-28 20:01:37 +0000 | [diff] [blame] | 284 | self.task2 = models.SpecialTask.objects.create( |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 285 | host=host, task=models.SpecialTask.Task.VERIFY, |
| 286 | queue_entry=entry2, # ran with job 2 |
jamesren | 76fcf19 | 2010-04-21 20:39:50 +0000 | [diff] [blame] | 287 | is_active=True, requested_by=models.User.current_user()) |
showard | 1a5a408 | 2009-07-28 20:01:37 +0000 | [diff] [blame] | 288 | self.task3 = models.SpecialTask.objects.create( |
jamesren | 76fcf19 | 2010-04-21 20:39:50 +0000 | [diff] [blame] | 289 | host=host, task=models.SpecialTask.Task.VERIFY, |
| 290 | requested_by=models.User.current_user()) # not yet run |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 291 | |
showard | 1a5a408 | 2009-07-28 20:01:37 +0000 | [diff] [blame] | 292 | |
| 293 | def test_get_special_tasks(self): |
| 294 | self._setup_special_tasks() |
| 295 | tasks = rpc_interface.get_special_tasks(host__hostname='host1', |
| 296 | queue_entry__isnull=True) |
| 297 | self.assertEquals(len(tasks), 2) |
| 298 | self.assertEquals(tasks[0]['task'], models.SpecialTask.Task.VERIFY) |
| 299 | self.assertEquals(tasks[0]['is_active'], False) |
| 300 | self.assertEquals(tasks[0]['is_complete'], True) |
| 301 | |
| 302 | |
| 303 | def test_get_latest_special_task(self): |
| 304 | # a particular usage of get_special_tasks() |
| 305 | self._setup_special_tasks() |
| 306 | self.task2.time_started = datetime.datetime(2009, 1, 2) |
| 307 | self.task2.save() |
| 308 | |
| 309 | tasks = rpc_interface.get_special_tasks( |
| 310 | host__hostname='host1', task=models.SpecialTask.Task.VERIFY, |
| 311 | time_started__isnull=False, sort_by=['-time_started'], |
| 312 | query_limit=1) |
| 313 | self.assertEquals(len(tasks), 1) |
| 314 | self.assertEquals(tasks[0]['id'], 2) |
| 315 | |
| 316 | |
| 317 | def _common_entry_check(self, entry_dict): |
| 318 | self.assertEquals(entry_dict['host']['hostname'], 'host1') |
| 319 | self.assertEquals(entry_dict['job']['id'], 2) |
| 320 | |
| 321 | |
| 322 | def test_get_host_queue_entries_and_special_tasks(self): |
| 323 | self._setup_special_tasks() |
| 324 | |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 325 | host = self.hosts[0].id |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 326 | entries_and_tasks = ( |
Jiaxi Luo | 79ce642 | 2014-06-13 17:08:09 -0700 | [diff] [blame] | 327 | rpc_interface.get_host_queue_entries_and_special_tasks(host)) |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 328 | |
| 329 | paths = [entry['execution_path'] for entry in entries_and_tasks] |
| 330 | self.assertEquals(paths, ['hosts/host1/3-verify', |
showard | fd8b89f | 2010-01-20 19:06:30 +0000 | [diff] [blame] | 331 | '2-autotest_system/host1', |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 332 | 'hosts/host1/2-verify', |
showard | fd8b89f | 2010-01-20 19:06:30 +0000 | [diff] [blame] | 333 | '1-autotest_system/host1', |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 334 | 'hosts/host1/1-verify']) |
| 335 | |
| 336 | verify2 = entries_and_tasks[2] |
| 337 | self._common_entry_check(verify2) |
| 338 | self.assertEquals(verify2['type'], 'Verify') |
| 339 | self.assertEquals(verify2['status'], 'Running') |
| 340 | self.assertEquals(verify2['execution_path'], 'hosts/host1/2-verify') |
| 341 | |
| 342 | entry2 = entries_and_tasks[1] |
| 343 | self._common_entry_check(entry2) |
| 344 | self.assertEquals(entry2['type'], 'Job') |
| 345 | self.assertEquals(entry2['status'], 'Queued') |
| 346 | self.assertEquals(entry2['started_on'], '2009-01-03 00:00:00') |
| 347 | |
| 348 | |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 349 | def test_view_invalid_host(self): |
| 350 | # RPCs used by View Host page should work for invalid hosts |
| 351 | self._create_job_helper(hosts=[1]) |
Jiaxi Luo | 79ce642 | 2014-06-13 17:08:09 -0700 | [diff] [blame] | 352 | host = self.hosts[0] |
| 353 | host.delete() |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 354 | |
| 355 | self.assertEquals(1, rpc_interface.get_num_hosts(hostname='host1', |
| 356 | valid_only=False)) |
| 357 | data = rpc_interface.get_hosts(hostname='host1', valid_only=False) |
| 358 | self.assertEquals(1, len(data)) |
| 359 | |
| 360 | self.assertEquals(1, rpc_interface.get_num_host_queue_entries( |
| 361 | host__hostname='host1')) |
| 362 | data = rpc_interface.get_host_queue_entries(host__hostname='host1') |
| 363 | self.assertEquals(1, len(data)) |
| 364 | |
| 365 | count = rpc_interface.get_num_host_queue_entries_and_special_tasks( |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 366 | host=host.id) |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 367 | self.assertEquals(1, count) |
| 368 | data = rpc_interface.get_host_queue_entries_and_special_tasks( |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 369 | host=host.id) |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 370 | self.assertEquals(1, len(data)) |
| 371 | |
| 372 | |
showard | 9bb960b | 2009-11-19 01:02:11 +0000 | [diff] [blame] | 373 | def test_reverify_hosts(self): |
mbligh | 4e545a5 | 2009-12-19 05:30:39 +0000 | [diff] [blame] | 374 | hostname_list = rpc_interface.reverify_hosts(id__in=[1, 2]) |
| 375 | self.assertEquals(hostname_list, ['host1', 'host2']) |
showard | 9bb960b | 2009-11-19 01:02:11 +0000 | [diff] [blame] | 376 | tasks = rpc_interface.get_special_tasks() |
| 377 | self.assertEquals(len(tasks), 2) |
| 378 | self.assertEquals(set(task['host']['id'] for task in tasks), |
| 379 | set([1, 2])) |
| 380 | |
| 381 | task = tasks[0] |
| 382 | self.assertEquals(task['task'], models.SpecialTask.Task.VERIFY) |
showard | fd8b89f | 2010-01-20 19:06:30 +0000 | [diff] [blame] | 383 | self.assertEquals(task['requested_by'], 'autotest_system') |
showard | 9bb960b | 2009-11-19 01:02:11 +0000 | [diff] [blame] | 384 | |
| 385 | |
Simran Basi | 73dae55 | 2013-02-25 14:57:46 -0800 | [diff] [blame] | 386 | def test_repair_hosts(self): |
| 387 | hostname_list = rpc_interface.repair_hosts(id__in=[1, 2]) |
| 388 | self.assertEquals(hostname_list, ['host1', 'host2']) |
| 389 | tasks = rpc_interface.get_special_tasks() |
| 390 | self.assertEquals(len(tasks), 2) |
| 391 | self.assertEquals(set(task['host']['id'] for task in tasks), |
| 392 | set([1, 2])) |
| 393 | |
| 394 | task = tasks[0] |
| 395 | self.assertEquals(task['task'], models.SpecialTask.Task.REPAIR) |
| 396 | self.assertEquals(task['requested_by'], 'autotest_system') |
| 397 | |
| 398 | |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 399 | def _modify_host_helper(self, on_shard=False, host_on_shard=False): |
| 400 | shard_hostname = 'shard1' |
| 401 | if on_shard: |
| 402 | global_config.global_config.override_config_value( |
| 403 | 'SHARD', 'shard_hostname', shard_hostname) |
| 404 | |
| 405 | host = models.Host.objects.all()[0] |
| 406 | if host_on_shard: |
| 407 | shard = models.Shard.objects.create(hostname=shard_hostname) |
| 408 | host.shard = shard |
| 409 | host.save() |
| 410 | |
| 411 | self.assertFalse(host.locked) |
| 412 | |
| 413 | self.god.stub_class_method(frontend.AFE, 'run') |
| 414 | |
| 415 | if host_on_shard and not on_shard: |
| 416 | mock_afe = self.god.create_mock_class_obj( |
MK Ryu | 9651ca5 | 2015-06-08 17:48:22 -0700 | [diff] [blame] | 417 | frontend_wrappers.RetryingAFE, 'MockAFE') |
| 418 | self.god.stub_with(frontend_wrappers, 'RetryingAFE', mock_afe) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 419 | |
MK Ryu | 9651ca5 | 2015-06-08 17:48:22 -0700 | [diff] [blame] | 420 | mock_afe2 = frontend_wrappers.RetryingAFE.expect_new( |
MK Ryu | 0a9c82e | 2015-09-17 17:54:01 -0700 | [diff] [blame] | 421 | server=shard_hostname, user=None) |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 422 | mock_afe2.run.expect_call('modify_host_local', id=host.id, |
MK Ryu | d53e149 | 2015-12-15 12:09:03 -0800 | [diff] [blame] | 423 | locked=True, lock_reason='_modify_host_helper lock', |
| 424 | lock_time=datetime.datetime(2015, 12, 15)) |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 425 | elif on_shard: |
| 426 | mock_afe = self.god.create_mock_class_obj( |
| 427 | frontend_wrappers.RetryingAFE, 'MockAFE') |
| 428 | self.god.stub_with(frontend_wrappers, 'RetryingAFE', mock_afe) |
| 429 | |
| 430 | mock_afe2 = frontend_wrappers.RetryingAFE.expect_new( |
Fang Deng | 0cb2a3b | 2015-12-10 17:59:00 -0800 | [diff] [blame] | 431 | server=server_utils.get_global_afe_hostname(), user=None) |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 432 | mock_afe2.run.expect_call('modify_host', id=host.id, |
MK Ryu | d53e149 | 2015-12-15 12:09:03 -0800 | [diff] [blame] | 433 | locked=True, lock_reason='_modify_host_helper lock', |
| 434 | lock_time=datetime.datetime(2015, 12, 15)) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 435 | |
Matthew Sartori | 6818633 | 2015-04-27 17:19:53 -0700 | [diff] [blame] | 436 | rpc_interface.modify_host(id=host.id, locked=True, |
MK Ryu | d53e149 | 2015-12-15 12:09:03 -0800 | [diff] [blame] | 437 | lock_reason='_modify_host_helper lock', |
| 438 | lock_time=datetime.datetime(2015, 12, 15)) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 439 | |
| 440 | host = models.Host.objects.get(pk=host.id) |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 441 | if on_shard: |
| 442 | # modify_host on shard does nothing but routing the RPC to master. |
| 443 | self.assertFalse(host.locked) |
| 444 | else: |
| 445 | self.assertTrue(host.locked) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 446 | self.god.check_playback() |
| 447 | |
| 448 | |
| 449 | def test_modify_host_on_master_host_on_master(self): |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 450 | """Call modify_host to master for host in master.""" |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 451 | self._modify_host_helper() |
| 452 | |
| 453 | |
| 454 | def test_modify_host_on_master_host_on_shard(self): |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 455 | """Call modify_host to master for host in shard.""" |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 456 | self._modify_host_helper(host_on_shard=True) |
| 457 | |
| 458 | |
| 459 | def test_modify_host_on_shard(self): |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 460 | """Call modify_host to shard for host in shard.""" |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 461 | self._modify_host_helper(on_shard=True, host_on_shard=True) |
| 462 | |
| 463 | |
| 464 | def test_modify_hosts_on_master_host_on_shard(self): |
| 465 | """Ensure calls to modify_hosts are correctly forwarded to shards.""" |
| 466 | host1 = models.Host.objects.all()[0] |
| 467 | host2 = models.Host.objects.all()[1] |
| 468 | |
| 469 | shard1 = models.Shard.objects.create(hostname='shard1') |
| 470 | host1.shard = shard1 |
| 471 | host1.save() |
| 472 | |
| 473 | shard2 = models.Shard.objects.create(hostname='shard2') |
| 474 | host2.shard = shard2 |
| 475 | host2.save() |
| 476 | |
| 477 | self.assertFalse(host1.locked) |
| 478 | self.assertFalse(host2.locked) |
| 479 | |
MK Ryu | 9651ca5 | 2015-06-08 17:48:22 -0700 | [diff] [blame] | 480 | mock_afe = self.god.create_mock_class_obj(frontend_wrappers.RetryingAFE, |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 481 | 'MockAFE') |
MK Ryu | 9651ca5 | 2015-06-08 17:48:22 -0700 | [diff] [blame] | 482 | self.god.stub_with(frontend_wrappers, 'RetryingAFE', mock_afe) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 483 | |
| 484 | # The statuses of one host might differ on master and shard. |
| 485 | # Filters are always applied on the master. So the host on the shard |
| 486 | # will be affected no matter what his status is. |
| 487 | filters_to_use = {'status': 'Ready'} |
| 488 | |
MK Ryu | 0a9c82e | 2015-09-17 17:54:01 -0700 | [diff] [blame] | 489 | mock_afe2 = frontend_wrappers.RetryingAFE.expect_new( |
| 490 | server='shard2', user=None) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 491 | mock_afe2.run.expect_call( |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 492 | 'modify_hosts_local', |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 493 | host_filter_data={'id__in': [shard1.id, shard2.id]}, |
Matthew Sartori | 6818633 | 2015-04-27 17:19:53 -0700 | [diff] [blame] | 494 | update_data={'locked': True, |
MK Ryu | d53e149 | 2015-12-15 12:09:03 -0800 | [diff] [blame] | 495 | 'lock_reason': 'Testing forward to shard', |
| 496 | 'lock_time' : datetime.datetime(2015, 12, 15) }) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 497 | |
MK Ryu | 0a9c82e | 2015-09-17 17:54:01 -0700 | [diff] [blame] | 498 | mock_afe1 = frontend_wrappers.RetryingAFE.expect_new( |
| 499 | server='shard1', user=None) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 500 | mock_afe1.run.expect_call( |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 501 | 'modify_hosts_local', |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 502 | host_filter_data={'id__in': [shard1.id, shard2.id]}, |
Matthew Sartori | 6818633 | 2015-04-27 17:19:53 -0700 | [diff] [blame] | 503 | update_data={'locked': True, |
MK Ryu | d53e149 | 2015-12-15 12:09:03 -0800 | [diff] [blame] | 504 | 'lock_reason': 'Testing forward to shard', |
| 505 | 'lock_time' : datetime.datetime(2015, 12, 15)}) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 506 | |
MK Ryu | d53e149 | 2015-12-15 12:09:03 -0800 | [diff] [blame] | 507 | rpc_interface.modify_hosts( |
| 508 | host_filter_data={'status': 'Ready'}, |
| 509 | update_data={'locked': True, |
| 510 | 'lock_reason': 'Testing forward to shard', |
| 511 | 'lock_time' : datetime.datetime(2015, 12, 15) }) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 512 | |
| 513 | host1 = models.Host.objects.get(pk=host1.id) |
| 514 | self.assertTrue(host1.locked) |
| 515 | host2 = models.Host.objects.get(pk=host2.id) |
| 516 | self.assertTrue(host2.locked) |
| 517 | self.god.check_playback() |
| 518 | |
| 519 | |
| 520 | def test_delete_host(self): |
| 521 | """Ensure an RPC is made on delete a host, if it is on a shard.""" |
| 522 | host1 = models.Host.objects.all()[0] |
| 523 | shard1 = models.Shard.objects.create(hostname='shard1') |
| 524 | host1.shard = shard1 |
| 525 | host1.save() |
| 526 | host1_id = host1.id |
| 527 | |
MK Ryu | 9651ca5 | 2015-06-08 17:48:22 -0700 | [diff] [blame] | 528 | mock_afe = self.god.create_mock_class_obj(frontend_wrappers.RetryingAFE, |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 529 | 'MockAFE') |
MK Ryu | 9651ca5 | 2015-06-08 17:48:22 -0700 | [diff] [blame] | 530 | self.god.stub_with(frontend_wrappers, 'RetryingAFE', mock_afe) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 531 | |
MK Ryu | 0a9c82e | 2015-09-17 17:54:01 -0700 | [diff] [blame] | 532 | mock_afe1 = frontend_wrappers.RetryingAFE.expect_new( |
| 533 | server='shard1', user=None) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 534 | mock_afe1.run.expect_call('delete_host', id=host1.id) |
| 535 | |
| 536 | rpc_interface.delete_host(id=host1.id) |
| 537 | |
| 538 | self.assertRaises(models.Host.DoesNotExist, |
| 539 | models.Host.smart_get, host1_id) |
| 540 | |
| 541 | self.god.check_playback() |
| 542 | |
| 543 | |
MK Ryu | 8e2c2d0 | 2016-01-06 15:24:38 -0800 | [diff] [blame] | 544 | def test_modify_label(self): |
| 545 | label1 = models.Label.objects.all()[0] |
| 546 | self.assertEqual(label1.invalid, 0) |
| 547 | |
| 548 | host2 = models.Host.objects.all()[1] |
| 549 | shard1 = models.Shard.objects.create(hostname='shard1') |
| 550 | host2.shard = shard1 |
| 551 | host2.labels.add(label1) |
| 552 | host2.save() |
| 553 | |
| 554 | mock_afe = self.god.create_mock_class_obj(frontend_wrappers.RetryingAFE, |
| 555 | 'MockAFE') |
| 556 | self.god.stub_with(frontend_wrappers, 'RetryingAFE', mock_afe) |
| 557 | |
| 558 | mock_afe1 = frontend_wrappers.RetryingAFE.expect_new( |
| 559 | server='shard1', user=None) |
| 560 | mock_afe1.run.expect_call('modify_label', id=label1.id, invalid=1) |
| 561 | |
| 562 | rpc_interface.modify_label(label1.id, invalid=1) |
| 563 | |
| 564 | self.assertEqual(models.Label.objects.all()[0].invalid, 1) |
| 565 | self.god.check_playback() |
| 566 | |
| 567 | |
| 568 | def test_delete_label(self): |
| 569 | label1 = models.Label.objects.all()[0] |
| 570 | |
| 571 | host2 = models.Host.objects.all()[1] |
| 572 | shard1 = models.Shard.objects.create(hostname='shard1') |
| 573 | host2.shard = shard1 |
| 574 | host2.labels.add(label1) |
| 575 | host2.save() |
| 576 | |
| 577 | mock_afe = self.god.create_mock_class_obj(frontend_wrappers.RetryingAFE, |
| 578 | 'MockAFE') |
| 579 | self.god.stub_with(frontend_wrappers, 'RetryingAFE', mock_afe) |
| 580 | |
| 581 | mock_afe1 = frontend_wrappers.RetryingAFE.expect_new( |
| 582 | server='shard1', user=None) |
| 583 | mock_afe1.run.expect_call('delete_label', id=label1.id) |
| 584 | |
| 585 | rpc_interface.delete_label(id=label1.id) |
| 586 | |
| 587 | self.assertRaises(models.Label.DoesNotExist, |
| 588 | models.Label.smart_get, label1.id) |
| 589 | self.god.check_playback() |
| 590 | |
| 591 | |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 592 | def test_get_image_for_job_with_keyval_build(self): |
| 593 | keyval_dict = {'build': 'cool-image'} |
Allen Li | 352b86a | 2016-12-14 12:11:27 -0800 | [diff] [blame] | 594 | job_id = rpc_interface.create_job(name='test', |
| 595 | priority=priorities.Priority.DEFAULT, |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 596 | control_file='foo', |
| 597 | control_type=CLIENT, |
| 598 | hosts=['host1'], |
| 599 | keyvals=keyval_dict) |
| 600 | job = models.Job.objects.get(id=job_id) |
| 601 | self.assertIsNotNone(job) |
| 602 | image = rpc_interface._get_image_for_job(job, True) |
| 603 | self.assertEquals('cool-image', image) |
| 604 | |
| 605 | |
| 606 | def test_get_image_for_job_with_keyval_builds(self): |
| 607 | keyval_dict = {'builds': {'cros-version': 'cool-image'}} |
Allen Li | 352b86a | 2016-12-14 12:11:27 -0800 | [diff] [blame] | 608 | job_id = rpc_interface.create_job(name='test', |
| 609 | priority=priorities.Priority.DEFAULT, |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 610 | control_file='foo', |
| 611 | control_type=CLIENT, |
| 612 | hosts=['host1'], |
| 613 | keyvals=keyval_dict) |
| 614 | job = models.Job.objects.get(id=job_id) |
| 615 | self.assertIsNotNone(job) |
| 616 | image = rpc_interface._get_image_for_job(job, True) |
| 617 | self.assertEquals('cool-image', image) |
| 618 | |
| 619 | |
| 620 | def test_get_image_for_job_with_control_build(self): |
| 621 | CONTROL_FILE = """build='cool-image' |
| 622 | """ |
Allen Li | 352b86a | 2016-12-14 12:11:27 -0800 | [diff] [blame] | 623 | job_id = rpc_interface.create_job(name='test', |
| 624 | priority=priorities.Priority.DEFAULT, |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 625 | control_file='foo', |
| 626 | control_type=CLIENT, |
| 627 | hosts=['host1']) |
| 628 | job = models.Job.objects.get(id=job_id) |
| 629 | self.assertIsNotNone(job) |
| 630 | job.control_file = CONTROL_FILE |
| 631 | image = rpc_interface._get_image_for_job(job, True) |
| 632 | self.assertEquals('cool-image', image) |
| 633 | |
| 634 | |
| 635 | def test_get_image_for_job_with_control_builds(self): |
| 636 | CONTROL_FILE = """builds={'cros-version': 'cool-image'} |
| 637 | """ |
Allen Li | 352b86a | 2016-12-14 12:11:27 -0800 | [diff] [blame] | 638 | job_id = rpc_interface.create_job(name='test', |
| 639 | priority=priorities.Priority.DEFAULT, |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 640 | control_file='foo', |
| 641 | control_type=CLIENT, |
| 642 | hosts=['host1']) |
| 643 | job = models.Job.objects.get(id=job_id) |
| 644 | self.assertIsNotNone(job) |
| 645 | job.control_file = CONTROL_FILE |
| 646 | image = rpc_interface._get_image_for_job(job, True) |
| 647 | self.assertEquals('cool-image', image) |
| 648 | |
| 649 | |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 650 | class ExtraRpcInterfaceTest(mox.MoxTestBase, |
| 651 | frontend_test_utils.FrontendTestMixin): |
| 652 | """Unit tests for functions originally in site_rpc_interface.py. |
| 653 | |
| 654 | @var _NAME: fake suite name. |
| 655 | @var _BOARD: fake board to reimage. |
| 656 | @var _BUILD: fake build with which to reimage. |
| 657 | @var _PRIORITY: fake priority with which to reimage. |
| 658 | """ |
| 659 | _NAME = 'name' |
| 660 | _BOARD = 'link' |
| 661 | _BUILD = 'link-release/R36-5812.0.0' |
| 662 | _BUILDS = {provision.CROS_VERSION_PREFIX: _BUILD} |
| 663 | _PRIORITY = priorities.Priority.DEFAULT |
| 664 | _TIMEOUT = 24 |
| 665 | |
| 666 | |
| 667 | def setUp(self): |
| 668 | super(ExtraRpcInterfaceTest, self).setUp() |
| 669 | self._SUITE_NAME = rpc_interface.canonicalize_suite_name( |
| 670 | self._NAME) |
| 671 | self.dev_server = self.mox.CreateMock(dev_server.ImageServer) |
| 672 | self._frontend_common_setup(fill_data=False) |
| 673 | |
| 674 | |
| 675 | def tearDown(self): |
| 676 | self._frontend_common_teardown() |
| 677 | |
| 678 | |
| 679 | def _setupDevserver(self): |
| 680 | self.mox.StubOutClassWithMocks(dev_server, 'ImageServer') |
| 681 | dev_server.resolve(self._BUILD).AndReturn(self.dev_server) |
| 682 | |
| 683 | |
| 684 | def _mockDevServerGetter(self, get_control_file=True): |
| 685 | self._setupDevserver() |
| 686 | if get_control_file: |
| 687 | self.getter = self.mox.CreateMock( |
| 688 | control_file_getter.DevServerGetter) |
| 689 | self.mox.StubOutWithMock(control_file_getter.DevServerGetter, |
| 690 | 'create') |
| 691 | control_file_getter.DevServerGetter.create( |
| 692 | mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(self.getter) |
| 693 | |
| 694 | |
| 695 | def _mockRpcUtils(self, to_return, control_file_substring=''): |
| 696 | """Fake out the autotest rpc_utils module with a mockable class. |
| 697 | |
| 698 | @param to_return: the value that rpc_utils.create_job_common() should |
| 699 | be mocked out to return. |
| 700 | @param control_file_substring: A substring that is expected to appear |
| 701 | in the control file output string that |
| 702 | is passed to create_job_common. |
| 703 | Default: '' |
| 704 | """ |
| 705 | download_started_time = constants.DOWNLOAD_STARTED_TIME |
| 706 | payload_finished_time = constants.PAYLOAD_FINISHED_TIME |
| 707 | self.mox.StubOutWithMock(rpc_utils, 'create_job_common') |
| 708 | rpc_utils.create_job_common(mox.And(mox.StrContains(self._NAME), |
| 709 | mox.StrContains(self._BUILD)), |
| 710 | priority=self._PRIORITY, |
| 711 | timeout_mins=self._TIMEOUT*60, |
| 712 | max_runtime_mins=self._TIMEOUT*60, |
| 713 | control_type='Server', |
| 714 | control_file=mox.And(mox.StrContains(self._BOARD), |
| 715 | mox.StrContains(self._BUILD), |
| 716 | mox.StrContains( |
| 717 | control_file_substring)), |
| 718 | hostless=True, |
| 719 | keyvals=mox.And(mox.In(download_started_time), |
| 720 | mox.In(payload_finished_time)) |
| 721 | ).AndReturn(to_return) |
| 722 | |
| 723 | |
| 724 | def testStageBuildFail(self): |
| 725 | """Ensure that a failure to stage the desired build fails the RPC.""" |
| 726 | self._setupDevserver() |
| 727 | |
| 728 | self.dev_server.hostname = 'mox_url' |
| 729 | self.dev_server.stage_artifacts( |
| 730 | image=self._BUILD, artifacts=['test_suites']).AndRaise( |
| 731 | dev_server.DevServerException()) |
| 732 | self.mox.ReplayAll() |
| 733 | self.assertRaises(error.StageControlFileFailure, |
| 734 | rpc_interface.create_suite_job, |
| 735 | name=self._NAME, |
| 736 | board=self._BOARD, |
| 737 | builds=self._BUILDS, |
| 738 | pool=None) |
| 739 | |
| 740 | |
| 741 | def testGetControlFileFail(self): |
| 742 | """Ensure that a failure to get needed control file fails the RPC.""" |
| 743 | self._mockDevServerGetter() |
| 744 | |
| 745 | self.dev_server.hostname = 'mox_url' |
| 746 | self.dev_server.stage_artifacts( |
| 747 | image=self._BUILD, artifacts=['test_suites']).AndReturn(True) |
| 748 | |
| 749 | self.getter.get_control_file_contents_by_name( |
| 750 | self._SUITE_NAME).AndReturn(None) |
| 751 | self.mox.ReplayAll() |
| 752 | self.assertRaises(error.ControlFileEmpty, |
| 753 | rpc_interface.create_suite_job, |
| 754 | name=self._NAME, |
| 755 | board=self._BOARD, |
| 756 | builds=self._BUILDS, |
| 757 | pool=None) |
| 758 | |
| 759 | |
| 760 | def testGetControlFileListFail(self): |
| 761 | """Ensure that a failure to get needed control file fails the RPC.""" |
| 762 | self._mockDevServerGetter() |
| 763 | |
| 764 | self.dev_server.hostname = 'mox_url' |
| 765 | self.dev_server.stage_artifacts( |
| 766 | image=self._BUILD, artifacts=['test_suites']).AndReturn(True) |
| 767 | |
| 768 | self.getter.get_control_file_contents_by_name( |
| 769 | self._SUITE_NAME).AndRaise(error.NoControlFileList()) |
| 770 | self.mox.ReplayAll() |
| 771 | self.assertRaises(error.NoControlFileList, |
| 772 | rpc_interface.create_suite_job, |
| 773 | name=self._NAME, |
| 774 | board=self._BOARD, |
| 775 | builds=self._BUILDS, |
| 776 | pool=None) |
| 777 | |
| 778 | |
| 779 | def testBadNumArgument(self): |
| 780 | """Ensure we handle bad values for the |num| argument.""" |
| 781 | self.assertRaises(error.SuiteArgumentException, |
| 782 | rpc_interface.create_suite_job, |
| 783 | name=self._NAME, |
| 784 | board=self._BOARD, |
| 785 | builds=self._BUILDS, |
| 786 | pool=None, |
| 787 | num='goo') |
| 788 | self.assertRaises(error.SuiteArgumentException, |
| 789 | rpc_interface.create_suite_job, |
| 790 | name=self._NAME, |
| 791 | board=self._BOARD, |
| 792 | builds=self._BUILDS, |
| 793 | pool=None, |
| 794 | num=[]) |
| 795 | self.assertRaises(error.SuiteArgumentException, |
| 796 | rpc_interface.create_suite_job, |
| 797 | name=self._NAME, |
| 798 | board=self._BOARD, |
| 799 | builds=self._BUILDS, |
| 800 | pool=None, |
| 801 | num='5') |
| 802 | |
| 803 | |
| 804 | |
| 805 | def testCreateSuiteJobFail(self): |
| 806 | """Ensure that failure to schedule the suite job fails the RPC.""" |
| 807 | self._mockDevServerGetter() |
| 808 | |
| 809 | self.dev_server.hostname = 'mox_url' |
| 810 | self.dev_server.stage_artifacts( |
| 811 | image=self._BUILD, artifacts=['test_suites']).AndReturn(True) |
| 812 | |
| 813 | self.getter.get_control_file_contents_by_name( |
| 814 | self._SUITE_NAME).AndReturn('f') |
| 815 | |
| 816 | self.dev_server.url().AndReturn('mox_url') |
| 817 | self._mockRpcUtils(-1) |
| 818 | self.mox.ReplayAll() |
| 819 | self.assertEquals( |
| 820 | rpc_interface.create_suite_job(name=self._NAME, |
| 821 | board=self._BOARD, |
| 822 | builds=self._BUILDS, pool=None), |
| 823 | -1) |
| 824 | |
| 825 | |
| 826 | def testCreateSuiteJobSuccess(self): |
| 827 | """Ensures that success results in a successful RPC.""" |
| 828 | self._mockDevServerGetter() |
| 829 | |
| 830 | self.dev_server.hostname = 'mox_url' |
| 831 | self.dev_server.stage_artifacts( |
| 832 | image=self._BUILD, artifacts=['test_suites']).AndReturn(True) |
| 833 | |
| 834 | self.getter.get_control_file_contents_by_name( |
| 835 | self._SUITE_NAME).AndReturn('f') |
| 836 | |
| 837 | self.dev_server.url().AndReturn('mox_url') |
| 838 | job_id = 5 |
| 839 | self._mockRpcUtils(job_id) |
| 840 | self.mox.ReplayAll() |
| 841 | self.assertEquals( |
| 842 | rpc_interface.create_suite_job(name=self._NAME, |
| 843 | board=self._BOARD, |
| 844 | builds=self._BUILDS, |
| 845 | pool=None), |
| 846 | job_id) |
| 847 | |
| 848 | |
| 849 | def testCreateSuiteJobNoHostCheckSuccess(self): |
| 850 | """Ensures that success results in a successful RPC.""" |
| 851 | self._mockDevServerGetter() |
| 852 | |
| 853 | self.dev_server.hostname = 'mox_url' |
| 854 | self.dev_server.stage_artifacts( |
| 855 | image=self._BUILD, artifacts=['test_suites']).AndReturn(True) |
| 856 | |
| 857 | self.getter.get_control_file_contents_by_name( |
| 858 | self._SUITE_NAME).AndReturn('f') |
| 859 | |
| 860 | self.dev_server.url().AndReturn('mox_url') |
| 861 | job_id = 5 |
| 862 | self._mockRpcUtils(job_id) |
| 863 | self.mox.ReplayAll() |
| 864 | self.assertEquals( |
| 865 | rpc_interface.create_suite_job(name=self._NAME, |
| 866 | board=self._BOARD, |
| 867 | builds=self._BUILDS, |
| 868 | pool=None, check_hosts=False), |
| 869 | job_id) |
| 870 | |
| 871 | def testCreateSuiteIntegerNum(self): |
| 872 | """Ensures that success results in a successful RPC.""" |
| 873 | self._mockDevServerGetter() |
| 874 | |
| 875 | self.dev_server.hostname = 'mox_url' |
| 876 | self.dev_server.stage_artifacts( |
| 877 | image=self._BUILD, artifacts=['test_suites']).AndReturn(True) |
| 878 | |
| 879 | self.getter.get_control_file_contents_by_name( |
| 880 | self._SUITE_NAME).AndReturn('f') |
| 881 | |
| 882 | self.dev_server.url().AndReturn('mox_url') |
| 883 | job_id = 5 |
| 884 | self._mockRpcUtils(job_id, control_file_substring='num=17') |
| 885 | self.mox.ReplayAll() |
| 886 | self.assertEquals( |
| 887 | rpc_interface.create_suite_job(name=self._NAME, |
| 888 | board=self._BOARD, |
| 889 | builds=self._BUILDS, |
| 890 | pool=None, |
| 891 | check_hosts=False, |
| 892 | num=17), |
| 893 | job_id) |
| 894 | |
| 895 | |
| 896 | def testCreateSuiteJobControlFileSupplied(self): |
| 897 | """Ensure we can supply the control file to create_suite_job.""" |
| 898 | self._mockDevServerGetter(get_control_file=False) |
| 899 | |
| 900 | self.dev_server.hostname = 'mox_url' |
| 901 | self.dev_server.stage_artifacts( |
| 902 | image=self._BUILD, artifacts=['test_suites']).AndReturn(True) |
| 903 | self.dev_server.url().AndReturn('mox_url') |
| 904 | job_id = 5 |
| 905 | self._mockRpcUtils(job_id) |
| 906 | self.mox.ReplayAll() |
| 907 | self.assertEquals( |
| 908 | rpc_interface.create_suite_job(name='%s/%s' % (self._NAME, |
| 909 | self._BUILD), |
| 910 | board=None, |
| 911 | builds=self._BUILDS, |
| 912 | pool=None, |
| 913 | control_file='CONTROL FILE'), |
| 914 | job_id) |
| 915 | |
| 916 | |
| 917 | def _get_records_for_sending_to_master(self): |
| 918 | return [{'control_file': 'foo', |
| 919 | 'control_type': 1, |
| 920 | 'created_on': datetime.datetime(2014, 8, 21), |
| 921 | 'drone_set': None, |
| 922 | 'email_list': '', |
| 923 | 'max_runtime_hrs': 72, |
| 924 | 'max_runtime_mins': 1440, |
| 925 | 'name': 'dummy', |
| 926 | 'owner': 'autotest_system', |
| 927 | 'parse_failed_repair': True, |
| 928 | 'priority': 40, |
| 929 | 'reboot_after': 0, |
| 930 | 'reboot_before': 1, |
| 931 | 'run_reset': True, |
| 932 | 'run_verify': False, |
| 933 | 'synch_count': 0, |
| 934 | 'test_retry': 10, |
| 935 | 'timeout': 24, |
| 936 | 'timeout_mins': 1440, |
| 937 | 'id': 1 |
| 938 | }], [{ |
| 939 | 'aborted': False, |
| 940 | 'active': False, |
| 941 | 'complete': False, |
| 942 | 'deleted': False, |
| 943 | 'execution_subdir': '', |
| 944 | 'finished_on': None, |
| 945 | 'started_on': None, |
| 946 | 'status': 'Queued', |
| 947 | 'id': 1 |
| 948 | }] |
| 949 | |
| 950 | |
| 951 | def _do_heartbeat_and_assert_response(self, shard_hostname='shard1', |
| 952 | upload_jobs=(), upload_hqes=(), |
| 953 | known_jobs=(), known_hosts=(), |
| 954 | **kwargs): |
| 955 | known_job_ids = [job.id for job in known_jobs] |
| 956 | known_host_ids = [host.id for host in known_hosts] |
| 957 | known_host_statuses = [host.status for host in known_hosts] |
| 958 | |
| 959 | retval = rpc_interface.shard_heartbeat( |
| 960 | shard_hostname=shard_hostname, |
| 961 | jobs=upload_jobs, hqes=upload_hqes, |
| 962 | known_job_ids=known_job_ids, known_host_ids=known_host_ids, |
| 963 | known_host_statuses=known_host_statuses) |
| 964 | |
| 965 | self._assert_shard_heartbeat_response(shard_hostname, retval, |
| 966 | **kwargs) |
| 967 | |
| 968 | return shard_hostname |
| 969 | |
| 970 | |
| 971 | def _assert_shard_heartbeat_response(self, shard_hostname, retval, jobs=[], |
| 972 | hosts=[], hqes=[]): |
| 973 | |
| 974 | retval_hosts, retval_jobs = retval['hosts'], retval['jobs'] |
| 975 | |
| 976 | expected_jobs = [ |
| 977 | (job.id, job.name, shard_hostname) for job in jobs] |
| 978 | returned_jobs = [(job['id'], job['name'], job['shard']['hostname']) |
| 979 | for job in retval_jobs] |
| 980 | self.assertEqual(returned_jobs, expected_jobs) |
| 981 | |
| 982 | expected_hosts = [(host.id, host.hostname) for host in hosts] |
| 983 | returned_hosts = [(host['id'], host['hostname']) |
| 984 | for host in retval_hosts] |
| 985 | self.assertEqual(returned_hosts, expected_hosts) |
| 986 | |
| 987 | retval_hqes = [] |
| 988 | for job in retval_jobs: |
| 989 | retval_hqes += job['hostqueueentry_set'] |
| 990 | |
| 991 | expected_hqes = [(hqe.id) for hqe in hqes] |
| 992 | returned_hqes = [(hqe['id']) for hqe in retval_hqes] |
| 993 | self.assertEqual(returned_hqes, expected_hqes) |
| 994 | |
| 995 | |
| 996 | def _send_records_to_master_helper( |
| 997 | self, jobs, hqes, shard_hostname='host1', |
| 998 | exception_to_throw=error.UnallowedRecordsSentToMaster, aborted=False): |
| 999 | job_id = rpc_interface.create_job( |
| 1000 | name='dummy', |
| 1001 | priority=self._PRIORITY, |
| 1002 | control_file='foo', |
| 1003 | control_type=SERVER, |
| 1004 | test_retry=10, hostless=True) |
| 1005 | job = models.Job.objects.get(pk=job_id) |
| 1006 | shard = models.Shard.objects.create(hostname='host1') |
| 1007 | job.shard = shard |
| 1008 | job.save() |
| 1009 | |
| 1010 | if aborted: |
| 1011 | job.hostqueueentry_set.update(aborted=True) |
| 1012 | job.shard = None |
| 1013 | job.save() |
| 1014 | |
| 1015 | hqe = job.hostqueueentry_set.all()[0] |
| 1016 | if not exception_to_throw: |
| 1017 | self._do_heartbeat_and_assert_response( |
| 1018 | shard_hostname=shard_hostname, |
| 1019 | upload_jobs=jobs, upload_hqes=hqes) |
| 1020 | else: |
| 1021 | self.assertRaises( |
| 1022 | exception_to_throw, |
| 1023 | self._do_heartbeat_and_assert_response, |
| 1024 | shard_hostname=shard_hostname, |
| 1025 | upload_jobs=jobs, upload_hqes=hqes) |
| 1026 | |
| 1027 | |
| 1028 | def testSendingRecordsToMaster(self): |
| 1029 | """Send records to the master and ensure they are persisted.""" |
| 1030 | jobs, hqes = self._get_records_for_sending_to_master() |
| 1031 | hqes[0]['status'] = 'Completed' |
| 1032 | self._send_records_to_master_helper( |
| 1033 | jobs=jobs, hqes=hqes, exception_to_throw=None) |
| 1034 | |
| 1035 | # Check the entry was actually written to db |
| 1036 | self.assertEqual(models.HostQueueEntry.objects.all()[0].status, |
| 1037 | 'Completed') |
| 1038 | |
| 1039 | |
| 1040 | def testSendingRecordsToMasterAbortedOnMaster(self): |
| 1041 | """Send records to the master and ensure they are persisted.""" |
| 1042 | jobs, hqes = self._get_records_for_sending_to_master() |
| 1043 | hqes[0]['status'] = 'Completed' |
| 1044 | self._send_records_to_master_helper( |
| 1045 | jobs=jobs, hqes=hqes, exception_to_throw=None, aborted=True) |
| 1046 | |
| 1047 | # Check the entry was actually written to db |
| 1048 | self.assertEqual(models.HostQueueEntry.objects.all()[0].status, |
| 1049 | 'Completed') |
| 1050 | |
| 1051 | |
| 1052 | def testSendingRecordsToMasterJobAssignedToDifferentShard(self): |
| 1053 | """Ensure records that belong to a different shard are rejected.""" |
| 1054 | jobs, hqes = self._get_records_for_sending_to_master() |
| 1055 | models.Shard.objects.create(hostname='other_shard') |
| 1056 | self._send_records_to_master_helper( |
| 1057 | jobs=jobs, hqes=hqes, shard_hostname='other_shard') |
| 1058 | |
| 1059 | |
| 1060 | def testSendingRecordsToMasterJobHqeWithoutJob(self): |
| 1061 | """Ensure update for hqe without update for it's job gets rejected.""" |
| 1062 | _, hqes = self._get_records_for_sending_to_master() |
| 1063 | self._send_records_to_master_helper( |
| 1064 | jobs=[], hqes=hqes) |
| 1065 | |
| 1066 | |
| 1067 | def testSendingRecordsToMasterNotExistingJob(self): |
| 1068 | """Ensure update for non existing job gets rejected.""" |
| 1069 | jobs, hqes = self._get_records_for_sending_to_master() |
| 1070 | jobs[0]['id'] = 3 |
| 1071 | |
| 1072 | self._send_records_to_master_helper( |
| 1073 | jobs=jobs, hqes=hqes) |
| 1074 | |
| 1075 | |
| 1076 | def _createShardAndHostWithLabel(self, shard_hostname='shard1', |
| 1077 | host_hostname='host1', |
| 1078 | label_name='board:lumpy'): |
| 1079 | label = models.Label.objects.create(name=label_name) |
| 1080 | |
| 1081 | shard = models.Shard.objects.create(hostname=shard_hostname) |
| 1082 | shard.labels.add(label) |
| 1083 | |
| 1084 | host = models.Host.objects.create(hostname=host_hostname, leased=False) |
| 1085 | host.labels.add(label) |
| 1086 | |
| 1087 | return shard, host, label |
| 1088 | |
| 1089 | |
| 1090 | def _createJobForLabel(self, label): |
| 1091 | job_id = rpc_interface.create_job(name='dummy', priority=self._PRIORITY, |
| 1092 | control_file='foo', |
| 1093 | control_type=CLIENT, |
| 1094 | meta_hosts=[label.name], |
| 1095 | dependencies=(label.name,)) |
| 1096 | return models.Job.objects.get(id=job_id) |
| 1097 | |
| 1098 | |
| 1099 | def testShardHeartbeatFetchHostlessJob(self): |
| 1100 | """Create a hostless job and ensure it's not assigned to a shard.""" |
| 1101 | shard1, host1, lumpy_label = self._createShardAndHostWithLabel( |
| 1102 | 'shard1', 'host1', 'board:lumpy') |
| 1103 | |
| 1104 | label2 = models.Label.objects.create(name='bluetooth', platform=False) |
| 1105 | |
| 1106 | job1 = self._create_job(hostless=True) |
| 1107 | |
| 1108 | # Hostless jobs should be executed by the global scheduler. |
| 1109 | self._do_heartbeat_and_assert_response(hosts=[host1]) |
| 1110 | |
| 1111 | |
| 1112 | def testShardRetrieveJobs(self): |
| 1113 | """Create jobs and retrieve them.""" |
| 1114 | # should never be returned by heartbeat |
| 1115 | leased_host = models.Host.objects.create(hostname='leased_host', |
| 1116 | leased=True) |
| 1117 | |
| 1118 | shard1, host1, lumpy_label = self._createShardAndHostWithLabel() |
| 1119 | shard2, host2, grumpy_label = self._createShardAndHostWithLabel( |
| 1120 | 'shard2', 'host2', 'board:grumpy') |
| 1121 | |
| 1122 | leased_host.labels.add(lumpy_label) |
| 1123 | |
| 1124 | job1 = self._createJobForLabel(lumpy_label) |
| 1125 | |
| 1126 | job2 = self._createJobForLabel(grumpy_label) |
| 1127 | |
| 1128 | job_completed = self._createJobForLabel(lumpy_label) |
| 1129 | # Job is already being run, so don't sync it |
| 1130 | job_completed.hostqueueentry_set.update(complete=True) |
| 1131 | job_completed.hostqueueentry_set.create(complete=False) |
| 1132 | |
| 1133 | job_active = self._createJobForLabel(lumpy_label) |
| 1134 | # Job is already started, so don't sync it |
| 1135 | job_active.hostqueueentry_set.update(active=True) |
| 1136 | job_active.hostqueueentry_set.create(complete=False, active=False) |
| 1137 | |
| 1138 | self._do_heartbeat_and_assert_response( |
| 1139 | jobs=[job1], hosts=[host1], hqes=job1.hostqueueentry_set.all()) |
| 1140 | |
| 1141 | self._do_heartbeat_and_assert_response( |
| 1142 | shard_hostname=shard2.hostname, |
| 1143 | jobs=[job2], hosts=[host2], hqes=job2.hostqueueentry_set.all()) |
| 1144 | |
| 1145 | host3 = models.Host.objects.create(hostname='host3', leased=False) |
| 1146 | host3.labels.add(lumpy_label) |
| 1147 | |
| 1148 | self._do_heartbeat_and_assert_response( |
| 1149 | known_jobs=[job1], known_hosts=[host1], hosts=[host3]) |
| 1150 | |
| 1151 | |
| 1152 | def testResendJobsAfterFailedHeartbeat(self): |
| 1153 | """Create jobs, retrieve them, fail on client, fetch them again.""" |
| 1154 | shard1, host1, lumpy_label = self._createShardAndHostWithLabel() |
| 1155 | |
| 1156 | job1 = self._createJobForLabel(lumpy_label) |
| 1157 | |
| 1158 | self._do_heartbeat_and_assert_response( |
| 1159 | jobs=[job1], |
| 1160 | hqes=job1.hostqueueentry_set.all(), hosts=[host1]) |
| 1161 | |
| 1162 | # Make sure it's resubmitted by sending last_job=None again |
| 1163 | self._do_heartbeat_and_assert_response( |
| 1164 | known_hosts=[host1], |
| 1165 | jobs=[job1], hqes=job1.hostqueueentry_set.all(), hosts=[]) |
| 1166 | |
| 1167 | # Now it worked, make sure it's not sent again |
| 1168 | self._do_heartbeat_and_assert_response( |
| 1169 | known_jobs=[job1], known_hosts=[host1]) |
| 1170 | |
| 1171 | job1 = models.Job.objects.get(pk=job1.id) |
| 1172 | job1.hostqueueentry_set.all().update(complete=True) |
| 1173 | |
| 1174 | # Job is completed, make sure it's not sent again |
| 1175 | self._do_heartbeat_and_assert_response( |
| 1176 | known_hosts=[host1]) |
| 1177 | |
| 1178 | job2 = self._createJobForLabel(lumpy_label) |
| 1179 | |
| 1180 | # job2's creation was later, it should be returned now. |
| 1181 | self._do_heartbeat_and_assert_response( |
| 1182 | known_hosts=[host1], |
| 1183 | jobs=[job2], hqes=job2.hostqueueentry_set.all()) |
| 1184 | |
| 1185 | self._do_heartbeat_and_assert_response( |
| 1186 | known_jobs=[job2], known_hosts=[host1]) |
| 1187 | |
| 1188 | job2 = models.Job.objects.get(pk=job2.pk) |
| 1189 | job2.hostqueueentry_set.update(aborted=True) |
| 1190 | # Setting a job to a complete status will set the shard_id to None in |
| 1191 | # scheduler_models. We have to emulate that here, because we use Django |
| 1192 | # models in tests. |
| 1193 | job2.shard = None |
| 1194 | job2.save() |
| 1195 | |
| 1196 | self._do_heartbeat_and_assert_response( |
| 1197 | known_jobs=[job2], known_hosts=[host1], |
| 1198 | jobs=[job2], |
| 1199 | hqes=job2.hostqueueentry_set.all()) |
| 1200 | |
| 1201 | models.Test.objects.create(name='platform_BootPerfServer:shard', |
| 1202 | test_type=1) |
| 1203 | self.mox.StubOutWithMock(server_utils, 'read_file') |
| 1204 | server_utils.read_file(mox.IgnoreArg()).AndReturn('') |
| 1205 | self.mox.ReplayAll() |
| 1206 | rpc_interface.delete_shard(hostname=shard1.hostname) |
| 1207 | |
| 1208 | self.assertRaises( |
| 1209 | models.Shard.DoesNotExist, models.Shard.objects.get, pk=shard1.id) |
| 1210 | |
| 1211 | job1 = models.Job.objects.get(pk=job1.id) |
| 1212 | lumpy_label = models.Label.objects.get(pk=lumpy_label.id) |
| 1213 | host1 = models.Host.objects.get(pk=host1.id) |
| 1214 | super_job = models.Job.objects.get(priority=priorities.Priority.SUPER) |
| 1215 | super_job_host = models.HostQueueEntry.objects.get( |
| 1216 | job_id=super_job.id) |
| 1217 | |
| 1218 | self.assertIsNone(job1.shard) |
| 1219 | self.assertEqual(len(lumpy_label.shard_set.all()), 0) |
| 1220 | self.assertIsNone(host1.shard) |
| 1221 | self.assertIsNotNone(super_job) |
| 1222 | self.assertEqual(super_job_host.host_id, host1.id) |
| 1223 | |
| 1224 | |
| 1225 | def testCreateListShard(self): |
| 1226 | """Retrieve a list of all shards.""" |
| 1227 | lumpy_label = models.Label.objects.create(name='board:lumpy', |
| 1228 | platform=True) |
| 1229 | stumpy_label = models.Label.objects.create(name='board:stumpy', |
| 1230 | platform=True) |
| 1231 | peppy_label = models.Label.objects.create(name='board:peppy', |
| 1232 | platform=True) |
| 1233 | |
| 1234 | shard_id = rpc_interface.add_shard( |
| 1235 | hostname='host1', labels='board:lumpy,board:stumpy') |
| 1236 | self.assertRaises(error.RPCException, |
| 1237 | rpc_interface.add_shard, |
| 1238 | hostname='host1', labels='board:lumpy,board:stumpy') |
| 1239 | self.assertRaises(model_logic.ValidationError, |
| 1240 | rpc_interface.add_shard, |
| 1241 | hostname='host1', labels='board:peppy') |
| 1242 | shard = models.Shard.objects.get(pk=shard_id) |
| 1243 | self.assertEqual(shard.hostname, 'host1') |
| 1244 | self.assertEqual(shard.labels.values_list('pk')[0], (lumpy_label.id,)) |
| 1245 | self.assertEqual(shard.labels.values_list('pk')[1], (stumpy_label.id,)) |
| 1246 | |
| 1247 | self.assertEqual(rpc_interface.get_shards(), |
| 1248 | [{'labels': ['board:lumpy','board:stumpy'], |
| 1249 | 'hostname': 'host1', |
| 1250 | 'id': 1}]) |
| 1251 | |
| 1252 | |
| 1253 | def testAddBoardsToShard(self): |
| 1254 | """Add boards to a given shard.""" |
| 1255 | shard1, host1, lumpy_label = self._createShardAndHostWithLabel() |
| 1256 | stumpy_label = models.Label.objects.create(name='board:stumpy', |
| 1257 | platform=True) |
| 1258 | shard_id = rpc_interface.add_board_to_shard( |
| 1259 | hostname='shard1', labels='board:stumpy') |
| 1260 | # Test whether raise exception when board label does not exist. |
| 1261 | self.assertRaises(models.Label.DoesNotExist, |
| 1262 | rpc_interface.add_board_to_shard, |
| 1263 | hostname='shard1', labels='board:test') |
| 1264 | # Test whether raise exception when board already sharded. |
| 1265 | self.assertRaises(error.RPCException, |
| 1266 | rpc_interface.add_board_to_shard, |
| 1267 | hostname='shard1', labels='board:lumpy') |
| 1268 | shard = models.Shard.objects.get(pk=shard_id) |
| 1269 | self.assertEqual(shard.hostname, 'shard1') |
| 1270 | self.assertEqual(shard.labels.values_list('pk')[0], (lumpy_label.id,)) |
| 1271 | self.assertEqual(shard.labels.values_list('pk')[1], (stumpy_label.id,)) |
| 1272 | |
| 1273 | self.assertEqual(rpc_interface.get_shards(), |
| 1274 | [{'labels': ['board:lumpy','board:stumpy'], |
| 1275 | 'hostname': 'shard1', |
| 1276 | 'id': 1}]) |
| 1277 | |
| 1278 | |
| 1279 | def testResendHostsAfterFailedHeartbeat(self): |
| 1280 | """Check that master accepts resending updated records after failure.""" |
| 1281 | shard1, host1, lumpy_label = self._createShardAndHostWithLabel() |
| 1282 | |
| 1283 | # Send the host |
| 1284 | self._do_heartbeat_and_assert_response(hosts=[host1]) |
| 1285 | |
| 1286 | # Send it again because previous one didn't persist correctly |
| 1287 | self._do_heartbeat_and_assert_response(hosts=[host1]) |
| 1288 | |
| 1289 | # Now it worked, make sure it isn't sent again |
| 1290 | self._do_heartbeat_and_assert_response(known_hosts=[host1]) |
| 1291 | |
| 1292 | |
showard | b6d1662 | 2009-05-26 19:35:29 +0000 | [diff] [blame] | 1293 | if __name__ == '__main__': |
| 1294 | unittest.main() |