blob: b0e95121822ecf5d06506d4c725bcb8576853580 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fiala24189d42014-07-14 06:24:44 +00006import os
7import os.path
Todd Fialae50b2e42014-06-13 19:11:33 +00008import pexpect
9import platform
Todd Fiala9e2d3292014-07-09 23:10:43 +000010import random
Todd Fialae2202002014-06-27 22:11:56 +000011import re
Todd Fialae50b2e42014-06-13 19:11:33 +000012import sets
13import signal
14import socket
15import subprocess
16import sys
Todd Fiala24189d42014-07-14 06:24:44 +000017import tempfile
Todd Fialae50b2e42014-06-13 19:11:33 +000018import time
Todd Fiala24189d42014-07-14 06:24:44 +000019import unittest2
Todd Fialae50b2e42014-06-13 19:11:33 +000020from lldbtest import *
21from lldbgdbserverutils import *
22import logging
Todd Fialae50b2e42014-06-13 19:11:33 +000023
24class GdbRemoteTestCaseBase(TestBase):
25
26 mydir = TestBase.compute_mydir(__file__)
27
Todd Fialae50b2e42014-06-13 19:11:33 +000028 _TIMEOUT_SECONDS = 5
29
30 _GDBREMOTE_KILL_PACKET = "$k#6b"
31
32 _LOGGING_LEVEL = logging.WARNING
33 # _LOGGING_LEVEL = logging.DEBUG
34
35 _STARTUP_ATTACH = "attach"
36 _STARTUP_LAUNCH = "launch"
37
38 # GDB Signal numbers that are not target-specific used for common exceptions
39 TARGET_EXC_BAD_ACCESS = 0x91
40 TARGET_EXC_BAD_INSTRUCTION = 0x92
41 TARGET_EXC_ARITHMETIC = 0x93
42 TARGET_EXC_EMULATION = 0x94
43 TARGET_EXC_SOFTWARE = 0x95
44 TARGET_EXC_BREAKPOINT = 0x96
45
46 def setUp(self):
47 TestBase.setUp(self)
48 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
49 logging.basicConfig(format=FORMAT)
50 self.logger = logging.getLogger(__name__)
51 self.logger.setLevel(self._LOGGING_LEVEL)
52 self.test_sequence = GdbRemoteTestSequence(self.logger)
53 self.set_inferior_startup_launch()
Todd Fiala9e2d3292014-07-09 23:10:43 +000054 self.port = self.get_next_port()
Todd Fiala67041192014-07-11 22:50:13 +000055 self.named_pipe_path = None
Todd Fiala24189d42014-07-14 06:24:44 +000056 self.named_pipe = None
57 self.named_pipe_fd = None
Todd Fialaf9ad21d2014-07-16 16:15:42 +000058 self.stub_sends_two_stop_notifications_on_kill = False
Todd Fialae50b2e42014-06-13 19:11:33 +000059
Todd Fiala9e2d3292014-07-09 23:10:43 +000060 def get_next_port(self):
61 return 12000 + random.randint(0,3999)
Todd Fialae50b2e42014-06-13 19:11:33 +000062
63 def reset_test_sequence(self):
64 self.test_sequence = GdbRemoteTestSequence(self.logger)
65
Todd Fiala24189d42014-07-14 06:24:44 +000066 def create_named_pipe(self):
67 # Create a temp dir and name for a pipe.
68 temp_dir = tempfile.mkdtemp()
69 named_pipe_path = os.path.join(temp_dir, "stub_port_number")
70
71 # Create the named pipe.
72 os.mkfifo(named_pipe_path)
73
74 # Open the read side of the pipe in non-blocking mode. This will return right away, ready or not.
75 named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK)
76
77 # Create the file for the named pipe. Note this will follow semantics of
78 # a non-blocking read side of a named pipe, which has different semantics
79 # than a named pipe opened for read in non-blocking mode.
80 named_pipe = os.fdopen(named_pipe_fd, "r")
81 self.assertIsNotNone(named_pipe)
82
83 def shutdown_named_pipe():
84 # Close the pipe.
85 try:
86 named_pipe.close()
87 except:
88 print "failed to close named pipe"
89 None
90
91 # Delete the pipe.
92 try:
93 os.remove(named_pipe_path)
94 except:
95 print "failed to delete named pipe: {}".format(named_pipe_path)
96 None
97
98 # Delete the temp directory.
99 try:
100 os.rmdir(temp_dir)
101 except:
102 print "failed to delete temp dir: {}, directory contents: '{}'".format(temp_dir, os.listdir(temp_dir))
103 None
104
105 # Add the shutdown hook to clean up the named pipe.
106 self.addTearDownHook(shutdown_named_pipe)
107
108 # Clear the port so the stub selects a port number.
109 self.port = 0
110
111 return (named_pipe_path, named_pipe, named_pipe_fd)
112
113 def get_stub_port_from_named_socket(self, read_timeout_seconds=5):
114 # Wait for something to read with a max timeout.
115 (ready_readers, _, _) = select.select([self.named_pipe_fd], [], [], read_timeout_seconds)
116 self.assertIsNotNone(ready_readers, "write side of pipe has not written anything - stub isn't writing to pipe.")
117 self.assertNotEqual(len(ready_readers), 0, "write side of pipe has not written anything - stub isn't writing to pipe.")
118
119 # Read the port from the named pipe.
120 stub_port_raw = self.named_pipe.read()
121 self.assertIsNotNone(stub_port_raw)
122 self.assertNotEqual(len(stub_port_raw), 0, "no content to read on pipe")
123
124 # Trim null byte, convert to int.
125 stub_port_raw = stub_port_raw[:-1]
126 stub_port = int(stub_port_raw)
127 self.assertTrue(stub_port > 0)
128
129 return stub_port
130
131 def init_llgs_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000132 self.debug_monitor_exe = get_lldb_gdbserver_exe()
133 if not self.debug_monitor_exe:
134 self.skipTest("lldb_gdbserver exe not found")
Todd Fiala8aae4f42014-06-13 23:34:17 +0000135 self.debug_monitor_extra_args = " -c 'log enable -T -f process-{}.log lldb break process thread' -c 'log enable -T -f packets-{}.log gdb-remote packets'".format(self.id(), self.id(), self.id())
Todd Fiala24189d42014-07-14 06:24:44 +0000136 if use_named_pipe:
137 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialae50b2e42014-06-13 19:11:33 +0000138
Todd Fiala24189d42014-07-14 06:24:44 +0000139 def init_debugserver_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000140 self.debug_monitor_exe = get_debugserver_exe()
141 if not self.debug_monitor_exe:
142 self.skipTest("debugserver exe not found")
143 self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName)
Todd Fiala24189d42014-07-14 06:24:44 +0000144 if use_named_pipe:
145 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialaf9ad21d2014-07-16 16:15:42 +0000146 # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification
147 # when the process truly dies.
148 self.stub_sends_two_stop_notifications_on_kill = True
Todd Fialae50b2e42014-06-13 19:11:33 +0000149
150 def create_socket(self):
151 sock = socket.socket()
152 logger = self.logger
153
154 def shutdown_socket():
155 if sock:
156 try:
157 # send the kill packet so lldb-gdbserver shuts down gracefully
158 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
159 except:
160 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
161
162 try:
163 sock.close()
164 except:
165 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
166
167 self.addTearDownHook(shutdown_socket)
168
Todd Fiala24189d42014-07-14 06:24:44 +0000169 connect_info = ("localhost", self.port)
170 # print "connecting to stub on {}:{}".format(connect_info[0], connect_info[1])
171 sock.connect(connect_info)
172
Todd Fialae50b2e42014-06-13 19:11:33 +0000173 return sock
174
175 def set_inferior_startup_launch(self):
176 self._inferior_startup = self._STARTUP_LAUNCH
177
178 def set_inferior_startup_attach(self):
179 self._inferior_startup = self._STARTUP_ATTACH
180
Todd Fiala8aae4f42014-06-13 23:34:17 +0000181 def launch_debug_monitor(self, attach_pid=None):
182 # Create the command line.
Todd Fialae50b2e42014-06-13 19:11:33 +0000183 commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port)
184 if attach_pid:
185 commandline += " --attach=%d" % attach_pid
Todd Fiala67041192014-07-11 22:50:13 +0000186 if self.named_pipe_path:
187 commandline += " --named-pipe %s" % self.named_pipe_path
Todd Fialae50b2e42014-06-13 19:11:33 +0000188
Todd Fiala8aae4f42014-06-13 23:34:17 +0000189 # Start the server.
Todd Fialae50b2e42014-06-13 19:11:33 +0000190 server = pexpect.spawn(commandline)
Todd Fiala24189d42014-07-14 06:24:44 +0000191 self.assertIsNotNone(server)
192 server.expect(r"(debugserver|lldb-gdbserver)", timeout=10)
193
194 # If we're receiving the stub's listening port from the named pipe, do that here.
195 if self.named_pipe:
196 self.port = self.get_stub_port_from_named_socket()
197 # print "debug server listening on {}".format(self.port)
Todd Fialae50b2e42014-06-13 19:11:33 +0000198
199 # Turn on logging for what the child sends back.
200 if self.TraceOn():
201 server.logfile_read = sys.stdout
202
Todd Fiala8aae4f42014-06-13 23:34:17 +0000203 return server
204
205 def connect_to_debug_monitor(self, attach_pid=None):
Todd Fiala24189d42014-07-14 06:24:44 +0000206 if self.named_pipe:
207 # Create the stub.
208 server = self.launch_debug_monitor(attach_pid=attach_pid)
209 self.assertIsNotNone(server)
210
211 def shutdown_debug_monitor():
212 try:
213 server.close()
214 except:
215 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
216 self.addTearDownHook(shutdown_debug_monitor)
217
218 # Schedule debug monitor to be shut down during teardown.
219 logger = self.logger
220
221 # Attach to the stub and return a socket opened to it.
222 self.sock = self.create_socket()
223 return server
224
225 # We're using a random port algorithm to try not to collide with other ports,
226 # and retry a max # times.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000227 attempts = 0
228 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000229
Todd Fiala8aae4f42014-06-13 23:34:17 +0000230 while attempts < MAX_ATTEMPTS:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000231 server = self.launch_debug_monitor(attach_pid=attach_pid)
232
233 # Wait until we receive the server ready message before continuing.
234 port_good = True
Todd Fiala8aae4f42014-06-13 23:34:17 +0000235 try:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000236 server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port))
237 except:
238 port_good = False
239 server.close()
Todd Fialae50b2e42014-06-13 19:11:33 +0000240
Todd Fiala9e2d3292014-07-09 23:10:43 +0000241 if port_good:
242 # Schedule debug monitor to be shut down during teardown.
243 logger = self.logger
244 def shutdown_debug_monitor():
245 try:
246 server.close()
247 except:
248 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
249 self.addTearDownHook(shutdown_debug_monitor)
Todd Fiala8aae4f42014-06-13 23:34:17 +0000250
Todd Fiala9e2d3292014-07-09 23:10:43 +0000251 # Create a socket to talk to the server
252 try:
253 self.sock = self.create_socket()
254 return server
255 except socket.error as serr:
256 # We're only trying to handle connection refused.
257 if serr.errno != errno.ECONNREFUSED:
258 raise serr
259 # We should close the server here to be safe.
260 server.close()
261
262 # Increment attempts.
263 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
264 attempts += 1
265
266 # And wait a random length of time before next attempt, to avoid collisions.
267 time.sleep(random.randint(1,5))
268
269 # Now grab a new port number.
270 self.port = self.get_next_port()
Todd Fiala8aae4f42014-06-13 23:34:17 +0000271
272 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000273
274 def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3):
275 # We're going to start a child process that the debug monitor stub can later attach to.
276 # This process needs to be started so that it just hangs around for a while. We'll
277 # have it sleep.
278 exe_path = os.path.abspath("a.out")
279
280 args = [exe_path]
281 if inferior_args:
282 args.extend(inferior_args)
283 if sleep_seconds:
284 args.append("sleep:%d" % sleep_seconds)
285
286 return subprocess.Popen(args)
287
288 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3):
289 """Prep the debug monitor, the inferior, and the expected packet stream.
290
291 Handle the separate cases of using the debug monitor in attach-to-inferior mode
292 and in launch-inferior mode.
293
294 For attach-to-inferior mode, the inferior process is first started, then
295 the debug monitor is started in attach to pid mode (using --attach on the
296 stub command line), and the no-ack-mode setup is appended to the packet
297 stream. The packet stream is not yet executed, ready to have more expected
298 packet entries added to it.
299
300 For launch-inferior mode, the stub is first started, then no ack mode is
301 setup on the expected packet stream, then the verified launch packets are added
302 to the expected socket stream. The packet stream is not yet executed, ready
303 to have more expected packet entries added to it.
304
305 The return value is:
306 {inferior:<inferior>, server:<server>}
307 """
308 inferior = None
309 attach_pid = None
310
311 if self._inferior_startup == self._STARTUP_ATTACH:
312 # Launch the process that we'll use as the inferior.
313 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds)
314 self.assertIsNotNone(inferior)
315 self.assertTrue(inferior.pid > 0)
316 attach_pid = inferior.pid
317
318 # Launch the debug monitor stub, attaching to the inferior.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000319 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000320 self.assertIsNotNone(server)
321
322 if self._inferior_startup == self._STARTUP_LAUNCH:
323 # Build launch args
324 launch_args = [os.path.abspath('a.out')]
325 if inferior_args:
326 launch_args.extend(inferior_args)
327
328 # Build the expected protocol stream
329 self.add_no_ack_remote_stream()
330 if self._inferior_startup == self._STARTUP_LAUNCH:
331 self.add_verified_launch_packets(launch_args)
332
333 return {"inferior":inferior, "server":server}
334
335 def add_no_ack_remote_stream(self):
336 self.test_sequence.add_log_lines(
337 ["read packet: +",
338 "read packet: $QStartNoAckMode#b0",
339 "send packet: +",
340 "send packet: $OK#9a",
341 "read packet: +"],
342 True)
343
344 def add_verified_launch_packets(self, launch_args):
345 self.test_sequence.add_log_lines(
346 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
347 "send packet: $OK#00",
348 "read packet: $qLaunchSuccess#a5",
349 "send packet: $OK#00"],
350 True)
351
352 def add_thread_suffix_request_packets(self):
353 self.test_sequence.add_log_lines(
354 ["read packet: $QThreadSuffixSupported#00",
355 "send packet: $OK#00",
356 ], True)
357
358 def add_process_info_collection_packets(self):
359 self.test_sequence.add_log_lines(
360 ["read packet: $qProcessInfo#00",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000361 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000362 True)
363
364 _KNOWN_PROCESS_INFO_KEYS = [
365 "pid",
366 "parent-pid",
367 "real-uid",
368 "real-gid",
369 "effective-uid",
370 "effective-gid",
371 "cputype",
372 "cpusubtype",
373 "ostype",
374 "vendor",
375 "endian",
376 "ptrsize"
377 ]
378
379 def parse_process_info_response(self, context):
380 # Ensure we have a process info response.
381 self.assertIsNotNone(context)
382 process_info_raw = context.get("process_info_raw")
383 self.assertIsNotNone(process_info_raw)
384
385 # Pull out key:value; pairs.
386 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
387
388 # Validate keys are known.
389 for (key, val) in process_info_dict.items():
390 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
391 self.assertIsNotNone(val)
392
393 return process_info_dict
394
395 def add_register_info_collection_packets(self):
396 self.test_sequence.add_log_lines(
397 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
398 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
399 "save_key":"reg_info_responses" } ],
400 True)
401
402 def parse_register_info_packets(self, context):
403 """Return an array of register info dictionaries, one per register info."""
404 reg_info_responses = context.get("reg_info_responses")
405 self.assertIsNotNone(reg_info_responses)
406
407 # Parse register infos.
408 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
409
Todd Fiala50a211b2014-06-14 22:00:36 +0000410 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000411 if not timeout_seconds:
412 timeout_seconds = self._TIMEOUT_SECONDS
413 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000414
415 _KNOWN_REGINFO_KEYS = [
416 "name",
417 "alt-name",
418 "bitsize",
419 "offset",
420 "encoding",
421 "format",
422 "set",
423 "gcc",
424 "dwarf",
425 "generic",
426 "container-regs",
427 "invalidate-regs"
428 ]
429
430 def assert_valid_reg_info(self, reg_info):
431 # Assert we know about all the reginfo keys parsed.
432 for key in reg_info:
433 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
434
435 # Check the bare-minimum expected set of register info keys.
436 self.assertTrue("name" in reg_info)
437 self.assertTrue("bitsize" in reg_info)
438 self.assertTrue("offset" in reg_info)
439 self.assertTrue("encoding" in reg_info)
440 self.assertTrue("format" in reg_info)
441
442 def find_pc_reg_info(self, reg_infos):
443 lldb_reg_index = 0
444 for reg_info in reg_infos:
445 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
446 return (lldb_reg_index, reg_info)
447 lldb_reg_index += 1
448
449 return (None, None)
450
451 def add_lldb_register_index(self, reg_infos):
452 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
453
454 We'll use this when we want to call packets like P/p with a register index but do so
455 on only a subset of the full register info set.
456 """
457 self.assertIsNotNone(reg_infos)
458
459 reg_index = 0
460 for reg_info in reg_infos:
461 reg_info["lldb_register_index"] = reg_index
462 reg_index += 1
463
464 def add_query_memory_region_packets(self, address):
465 self.test_sequence.add_log_lines(
466 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
467 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
468 True)
469
Todd Fialac30281a2014-06-14 03:03:23 +0000470 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000471 self.assertIsNotNone(key_val_text)
472 kv_dict = {}
473 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000474 key = match.group(1)
475 val = match.group(2)
476 if key in kv_dict:
477 if allow_dupes:
478 if type(kv_dict[key]) == list:
479 kv_dict[key].append(val)
480 else:
481 # Promote to list
482 kv_dict[key] = [kv_dict[key], val]
483 else:
484 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
485 else:
486 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000487 return kv_dict
488
489 def parse_memory_region_packet(self, context):
490 # Ensure we have a context.
491 self.assertIsNotNone(context.get("memory_region_response"))
492
493 # Pull out key:value; pairs.
494 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
495
496 # Validate keys are known.
497 for (key, val) in mem_region_dict.items():
498 self.assertTrue(key in ["start", "size", "permissions", "error"])
499 self.assertIsNotNone(val)
500
501 # Return the dictionary of key-value pairs for the memory region.
502 return mem_region_dict
503
504 def assert_address_within_memory_region(self, test_address, mem_region_dict):
505 self.assertIsNotNone(mem_region_dict)
506 self.assertTrue("start" in mem_region_dict)
507 self.assertTrue("size" in mem_region_dict)
508
509 range_start = int(mem_region_dict["start"], 16)
510 range_size = int(mem_region_dict["size"], 16)
511 range_end = range_start + range_size
512
513 if test_address < range_start:
514 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
515 elif test_address >= range_end:
516 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
517
518 def add_threadinfo_collection_packets(self):
519 self.test_sequence.add_log_lines(
520 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
521 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
522 "save_key":"threadinfo_responses" } ],
523 True)
524
525 def parse_threadinfo_packets(self, context):
526 """Return an array of thread ids (decimal ints), one per thread."""
527 threadinfo_responses = context.get("threadinfo_responses")
528 self.assertIsNotNone(threadinfo_responses)
529
530 thread_ids = []
531 for threadinfo_response in threadinfo_responses:
532 new_thread_infos = parse_threadinfo_response(threadinfo_response)
533 thread_ids.extend(new_thread_infos)
534 return thread_ids
535
536 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
537 start_time = time.time()
538 timeout_time = start_time + timeout_seconds
539
540 actual_thread_count = 0
541 while actual_thread_count < thread_count:
542 self.reset_test_sequence()
543 self.add_threadinfo_collection_packets()
544
545 context = self.expect_gdbremote_sequence()
546 self.assertIsNotNone(context)
547
548 threads = self.parse_threadinfo_packets(context)
549 self.assertIsNotNone(threads)
550
551 actual_thread_count = len(threads)
552
553 if time.time() > timeout_time:
554 raise Exception(
555 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
556 timeout_seconds, thread_count, actual_thread_count))
557
558 return threads
559
560 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
561 self.test_sequence.add_log_lines(
562 [# Set the breakpoint.
563 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
564 # Verify the stub could set it.
565 "send packet: $OK#00",
566 ], True)
567
568 if (do_continue):
569 self.test_sequence.add_log_lines(
570 [# Continue the inferior.
571 "read packet: $c#00",
572 # Expect a breakpoint stop report.
573 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
574 ], True)
575
576 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
577 self.test_sequence.add_log_lines(
578 [# Remove the breakpoint.
579 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
580 # Verify the stub could unset it.
581 "send packet: $OK#00",
582 ], True)
583
584 def add_qSupported_packets(self):
585 self.test_sequence.add_log_lines(
586 ["read packet: $qSupported#00",
587 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
588 ], True)
589
590 _KNOWN_QSUPPORTED_STUB_FEATURES = [
591 "augmented-libraries-svr4-read",
592 "PacketSize",
593 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000594 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000595 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000596 "qXfer:auxv:read",
597 "qXfer:libraries:read",
598 "qXfer:libraries-svr4:read",
599 ]
600
601 def parse_qSupported_response(self, context):
602 self.assertIsNotNone(context)
603
604 raw_response = context.get("qSupported_response")
605 self.assertIsNotNone(raw_response)
606
607 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
608 # +,-,? is stripped from the key and set as the value.
609 supported_dict = {}
610 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
611 key = match.group(1)
612 val = match.group(3)
613
614 # key=val: store as is
615 if val and len(val) > 0:
616 supported_dict[key] = val
617 else:
618 if len(key) < 2:
619 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
620 supported_type = key[-1]
621 key = key[:-1]
622 if not supported_type in ["+", "-", "?"]:
623 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
624 supported_dict[key] = supported_type
625 # Ensure we know the supported element
626 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
627 raise Exception("unknown qSupported stub feature reported: %s" % key)
628
629 return supported_dict
630
631 def run_process_then_stop(self, run_seconds=1):
632 # Tell the stub to continue.
633 self.test_sequence.add_log_lines(
634 ["read packet: $vCont;c#00"],
635 True)
636 context = self.expect_gdbremote_sequence()
637
638 # Wait for run_seconds.
639 time.sleep(run_seconds)
640
641 # Send an interrupt, capture a T response.
642 self.reset_test_sequence()
643 self.test_sequence.add_log_lines(
644 ["read packet: {}".format(chr(03)),
645 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
646 True)
647 context = self.expect_gdbremote_sequence()
648 self.assertIsNotNone(context)
649 self.assertIsNotNone(context.get("stop_result"))
650
651 return context
652
653 def select_modifiable_register(self, reg_infos):
654 """Find a register that can be read/written freely."""
655 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
656
657 # First check for the first register from the preferred register name set.
658 alternative_register_index = None
659
660 self.assertIsNotNone(reg_infos)
661 for reg_info in reg_infos:
662 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
663 # We found a preferred register. Use it.
664 return reg_info["lldb_register_index"]
665 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
666 # A frame pointer register will do as a register to modify temporarily.
667 alternative_register_index = reg_info["lldb_register_index"]
668
669 # We didn't find a preferred register. Return whatever alternative register
670 # we found, if any.
671 return alternative_register_index
672
673 def extract_registers_from_stop_notification(self, stop_key_vals_text):
674 self.assertIsNotNone(stop_key_vals_text)
675 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
676
677 registers = {}
678 for (key, val) in kv_dict.items():
679 if re.match(r"^[0-9a-fA-F]+", key):
680 registers[int(key, 16)] = val
681 return registers
682
683 def gather_register_infos(self):
684 self.reset_test_sequence()
685 self.add_register_info_collection_packets()
686
687 context = self.expect_gdbremote_sequence()
688 self.assertIsNotNone(context)
689
690 reg_infos = self.parse_register_info_packets(context)
691 self.assertIsNotNone(reg_infos)
692 self.add_lldb_register_index(reg_infos)
693
694 return reg_infos
695
696 def find_generic_register_with_name(self, reg_infos, generic_name):
697 self.assertIsNotNone(reg_infos)
698 for reg_info in reg_infos:
699 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
700 return reg_info
701 return None
702
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000703 def decode_gdbremote_binary(self, encoded_bytes):
704 decoded_bytes = ""
705 i = 0
706 while i < len(encoded_bytes):
707 if encoded_bytes[i] == "}":
708 # Handle escaped char.
709 self.assertTrue(i + 1 < len(encoded_bytes))
710 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
711 i +=2
712 elif encoded_bytes[i] == "*":
713 # Handle run length encoding.
714 self.assertTrue(len(decoded_bytes) > 0)
715 self.assertTrue(i + 1 < len(encoded_bytes))
716 repeat_count = ord(encoded_bytes[i+1]) - 29
717 decoded_bytes += decoded_bytes[-1] * repeat_count
718 i += 2
719 else:
720 decoded_bytes += encoded_bytes[i]
721 i += 1
722 return decoded_bytes
723
724 def build_auxv_dict(self, endian, word_size, auxv_data):
725 self.assertIsNotNone(endian)
726 self.assertIsNotNone(word_size)
727 self.assertIsNotNone(auxv_data)
728
729 auxv_dict = {}
730
731 while len(auxv_data) > 0:
732 # Chop off key.
733 raw_key = auxv_data[:word_size]
734 auxv_data = auxv_data[word_size:]
735
736 # Chop of value.
737 raw_value = auxv_data[:word_size]
738 auxv_data = auxv_data[word_size:]
739
740 # Convert raw text from target endian.
741 key = unpack_endian_binary_string(endian, raw_key)
742 value = unpack_endian_binary_string(endian, raw_value)
743
744 # Handle ending entry.
745 if key == 0:
746 self.assertEquals(value, 0)
747 return auxv_dict
748
749 # The key should not already be present.
750 self.assertFalse(key in auxv_dict)
751 auxv_dict[key] = value
752
753 self.fail("should not reach here - implies required double zero entry not found")
754 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000755
756 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
757 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
758 offset = 0
759 done = False
760 decoded_data = ""
761
762 while not done:
763 # Grab the next iteration of data.
764 self.reset_test_sequence()
765 self.test_sequence.add_log_lines([
766 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000767 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000768 ], True)
769
770 context = self.expect_gdbremote_sequence()
771 self.assertIsNotNone(context)
772
773 response_type = context.get("response_type")
774 self.assertIsNotNone(response_type)
775 self.assertTrue(response_type in ["l", "m"])
776
777 # Move offset along.
778 offset += chunk_length
779
780 # Figure out if we're done. We're done if the response type is l.
781 done = response_type == "l"
782
783 # Decode binary data.
784 content_raw = context.get("content_raw")
785 if content_raw and len(content_raw) > 0:
786 self.assertIsNotNone(content_raw)
787 decoded_data += self.decode_gdbremote_binary(content_raw)
788 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000789
790 def add_interrupt_packets(self):
791 self.test_sequence.add_log_lines([
792 # Send the intterupt.
793 "read packet: {}".format(chr(03)),
794 # And wait for the stop notification.
795 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
796 ], True)
797
798 def parse_interrupt_packets(self, context):
799 self.assertIsNotNone(context.get("stop_signo"))
800 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000801 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
802
803 def add_QSaveRegisterState_packets(self, thread_id):
804 if thread_id:
805 # Use the thread suffix form.
806 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
807 else:
808 request = "read packet: $QSaveRegisterState#00"
809
810 self.test_sequence.add_log_lines([
811 request,
812 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
813 ], True)
814
815 def parse_QSaveRegisterState_response(self, context):
816 self.assertIsNotNone(context)
817
818 save_response = context.get("save_response")
819 self.assertIsNotNone(save_response)
820
821 if len(save_response) < 1 or save_response[0] == "E":
822 # error received
823 return (False, None)
824 else:
825 return (True, int(save_response))
826
827 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
828 if thread_id:
829 # Use the thread suffix form.
830 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
831 else:
832 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
833
834 self.test_sequence.add_log_lines([
835 request,
836 "send packet: $OK#00"
837 ], True)
838
839 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
840 self.assertIsNotNone(reg_infos)
841
842 successful_writes = 0
843 failed_writes = 0
844
845 for reg_info in reg_infos:
846 # Use the lldb register index added to the reg info. We're not necessarily
847 # working off a full set of register infos, so an inferred register index could be wrong.
848 reg_index = reg_info["lldb_register_index"]
849 self.assertIsNotNone(reg_index)
850
851 reg_byte_size = int(reg_info["bitsize"])/8
852 self.assertTrue(reg_byte_size > 0)
853
854 # Handle thread suffix.
855 if thread_id:
856 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
857 else:
858 p_request = "read packet: $p{:x}#00".format(reg_index)
859
860 # Read the existing value.
861 self.reset_test_sequence()
862 self.test_sequence.add_log_lines([
863 p_request,
864 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
865 ], True)
866 context = self.expect_gdbremote_sequence()
867 self.assertIsNotNone(context)
868
869 # Verify the response length.
870 p_response = context.get("p_response")
871 self.assertIsNotNone(p_response)
872 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
873
874 # Flip the value by xoring with all 1s
875 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
876 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
877 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
878
879 # Handle thread suffix for P.
880 if thread_id:
881 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
882 else:
883 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
884
885 # Write the flipped value to the register.
886 self.reset_test_sequence()
887 self.test_sequence.add_log_lines([
888 P_request,
889 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
890 ], True)
891 context = self.expect_gdbremote_sequence()
892 self.assertIsNotNone(context)
893
894 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
895 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
896 # all flipping perfectly.
897 P_response = context.get("P_response")
898 self.assertIsNotNone(P_response)
899 if P_response == "OK":
900 successful_writes += 1
901 else:
902 failed_writes += 1
903 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
904
905 # Read back the register value, ensure it matches the flipped value.
906 if P_response == "OK":
907 self.reset_test_sequence()
908 self.test_sequence.add_log_lines([
909 p_request,
910 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
911 ], True)
912 context = self.expect_gdbremote_sequence()
913 self.assertIsNotNone(context)
914
915 verify_p_response_raw = context.get("p_response")
916 self.assertIsNotNone(verify_p_response_raw)
917 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
918
919 if verify_bits != flipped_bits_int:
920 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
921 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
922 successful_writes -= 1
923 failed_writes +=1
924
925 return (successful_writes, failed_writes)
926
927 def is_bit_flippable_register(self, reg_info):
928 if not reg_info:
929 return False
930 if not "set" in reg_info:
931 return False
932 if reg_info["set"] != "General Purpose Registers":
933 return False
934 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
935 # Don't try to bit flip registers contained in another register.
936 return False
937 if re.match("^.s$", reg_info["name"]):
938 # This is a 2-letter register name that ends in "s", like a segment register.
939 # Don't try to bit flip these.
940 return False
941 # Okay, this looks fine-enough.
942 return True
943
944 def read_register_values(self, reg_infos, endian, thread_id=None):
945 self.assertIsNotNone(reg_infos)
946 values = {}
947
948 for reg_info in reg_infos:
949 # We append a register index when load reg infos so we can work with subsets.
950 reg_index = reg_info.get("lldb_register_index")
951 self.assertIsNotNone(reg_index)
952
953 # Handle thread suffix.
954 if thread_id:
955 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
956 else:
957 p_request = "read packet: $p{:x}#00".format(reg_index)
958
959 # Read it with p.
960 self.reset_test_sequence()
961 self.test_sequence.add_log_lines([
962 p_request,
963 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
964 ], True)
965 context = self.expect_gdbremote_sequence()
966 self.assertIsNotNone(context)
967
968 # Convert value from target endian to integral.
969 p_response = context.get("p_response")
970 self.assertIsNotNone(p_response)
971 self.assertTrue(len(p_response) > 0)
972 self.assertFalse(p_response[0] == "E")
973
974 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
975
Todd Fialae2202002014-06-27 22:11:56 +0000976 return values
977
978 def add_vCont_query_packets(self):
979 self.test_sequence.add_log_lines([
980 "read packet: $vCont?#00",
981 {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } },
982 ], True)
983
984 def parse_vCont_query_response(self, context):
985 self.assertIsNotNone(context)
986 vCont_query_response = context.get("vCont_query_response")
987
988 # Handle case of no vCont support at all - in which case the capture group will be none or zero length.
989 if not vCont_query_response or len(vCont_query_response) == 0:
990 return {}
991
992 return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0}
993
994 def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"):
995 """Used by single step test that appears in a few different contexts."""
996 single_step_count = 0
997
998 while single_step_count < max_step_count:
999 self.assertIsNotNone(thread_id)
1000
1001 # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id.
1002 step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
1003 # print "\nstep_packet created: {}\n".format(step_packet)
1004
1005 # Single step.
1006 self.reset_test_sequence()
1007 if use_Hc_packet:
1008 self.test_sequence.add_log_lines(
1009 [# Set the continue thread.
1010 "read packet: $Hc{0:x}#00".format(thread_id),
1011 "send packet: $OK#00",
1012 ], True)
1013 self.test_sequence.add_log_lines([
1014 # Single step.
1015 step_packet,
1016 # "read packet: $vCont;s:{0:x}#00".format(thread_id),
1017 # Expect a breakpoint stop report.
1018 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
1019 ], True)
1020 context = self.expect_gdbremote_sequence()
1021 self.assertIsNotNone(context)
1022 self.assertIsNotNone(context.get("stop_signo"))
1023 self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP)
1024
1025 single_step_count += 1
1026
1027 # See if the predicate is true. If so, we're done.
1028 if predicate(args):
1029 return (True, single_step_count)
1030
1031 # The predicate didn't return true within the runaway step count.
1032 return (False, single_step_count)
1033
1034 def g_c1_c2_contents_are(self, args):
1035 """Used by single step test that appears in a few different contexts."""
1036 g_c1_address = args["g_c1_address"]
1037 g_c2_address = args["g_c2_address"]
1038 expected_g_c1 = args["expected_g_c1"]
1039 expected_g_c2 = args["expected_g_c2"]
1040
1041 # Read g_c1 and g_c2 contents.
1042 self.reset_test_sequence()
1043 self.test_sequence.add_log_lines(
1044 ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
1045 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} },
1046 "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
1047 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }],
1048 True)
1049
1050 # Run the packet stream.
1051 context = self.expect_gdbremote_sequence()
1052 self.assertIsNotNone(context)
1053
1054 # Check if what we read from inferior memory is what we are expecting.
1055 self.assertIsNotNone(context.get("g_c1_contents"))
1056 self.assertIsNotNone(context.get("g_c2_contents"))
1057
1058 return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2)
1059
1060 def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"):
1061 """Used by single step test that appears in a few different contexts."""
1062 # Start up the inferior.
1063 procs = self.prep_debug_monitor_and_inferior(
1064 inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"])
1065
1066 # Run the process
1067 self.test_sequence.add_log_lines(
1068 [# Start running after initial stop.
1069 "read packet: $c#00",
1070 # Match output line that prints the memory address of the function call entry point.
1071 # Note we require launch-only testing so we can get inferior otuput.
1072 { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
1073 "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} },
1074 # Now stop the inferior.
1075 "read packet: {}".format(chr(03)),
1076 # And wait for the stop notification.
1077 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }],
1078 True)
1079
1080 # Run the packet stream.
1081 context = self.expect_gdbremote_sequence()
1082 self.assertIsNotNone(context)
1083
1084 # Grab the main thread id.
1085 self.assertIsNotNone(context.get("stop_thread_id"))
1086 main_thread_id = int(context.get("stop_thread_id"), 16)
1087
1088 # Grab the function address.
1089 self.assertIsNotNone(context.get("function_address"))
1090 function_address = int(context.get("function_address"), 16)
1091
1092 # Grab the data addresses.
1093 self.assertIsNotNone(context.get("g_c1_address"))
1094 g_c1_address = int(context.get("g_c1_address"), 16)
1095
1096 self.assertIsNotNone(context.get("g_c2_address"))
1097 g_c2_address = int(context.get("g_c2_address"), 16)
1098
1099 # Set a breakpoint at the given address.
1100 # Note this might need to be switched per platform (ARM, mips, etc.).
1101 BREAKPOINT_KIND = 1
1102 self.reset_test_sequence()
1103 self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND)
1104 context = self.expect_gdbremote_sequence()
1105 self.assertIsNotNone(context)
1106
1107 # Remove the breakpoint.
1108 self.reset_test_sequence()
1109 self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND)
1110 context = self.expect_gdbremote_sequence()
1111 self.assertIsNotNone(context)
1112
1113 # Verify g_c1 and g_c2 match expected initial state.
1114 args = {}
1115 args["g_c1_address"] = g_c1_address
1116 args["g_c2_address"] = g_c2_address
1117 args["expected_g_c1"] = "0"
1118 args["expected_g_c2"] = "1"
1119
1120 self.assertTrue(self.g_c1_c2_contents_are(args))
1121
1122 # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code.
1123 args["expected_g_c1"] = "1"
1124 args["expected_g_c2"] = "1"
1125 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1126 self.assertTrue(state_reached)
1127
1128 # Verify we hit the next state.
1129 args["expected_g_c1"] = "1"
1130 args["expected_g_c2"] = "0"
1131 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1132 self.assertTrue(state_reached)
1133 self.assertEquals(step_count, 1)
1134
1135 # Verify we hit the next state.
1136 args["expected_g_c1"] = "0"
1137 args["expected_g_c2"] = "0"
1138 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1139 self.assertTrue(state_reached)
1140 self.assertEquals(step_count, 1)
1141
1142 # Verify we hit the next state.
1143 args["expected_g_c1"] = "0"
1144 args["expected_g_c2"] = "1"
1145 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1146 self.assertTrue(state_reached)
1147 self.assertEquals(step_count, 1)
Todd Fialaaf245d12014-06-30 21:05:18 +00001148