blob: 8c473d586de000c5dec62067717e5adbd19f8232 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fiala24189d42014-07-14 06:24:44 +00006import os
7import os.path
Todd Fialae50b2e42014-06-13 19:11:33 +00008import platform
Todd Fiala9e2d3292014-07-09 23:10:43 +00009import random
Todd Fialae2202002014-06-27 22:11:56 +000010import re
Todd Fiala31bde322014-07-26 20:39:17 +000011import select
Todd Fialae50b2e42014-06-13 19:11:33 +000012import sets
13import signal
14import socket
15import subprocess
16import sys
Todd Fiala24189d42014-07-14 06:24:44 +000017import tempfile
Todd Fialae50b2e42014-06-13 19:11:33 +000018import time
Todd Fiala24189d42014-07-14 06:24:44 +000019import unittest2
Todd Fialae50b2e42014-06-13 19:11:33 +000020from lldbtest import *
21from lldbgdbserverutils import *
22import logging
Todd Fialae50b2e42014-06-13 19:11:33 +000023
24class GdbRemoteTestCaseBase(TestBase):
25
26 mydir = TestBase.compute_mydir(__file__)
27
Todd Fialae50b2e42014-06-13 19:11:33 +000028 _TIMEOUT_SECONDS = 5
29
30 _GDBREMOTE_KILL_PACKET = "$k#6b"
31
32 _LOGGING_LEVEL = logging.WARNING
33 # _LOGGING_LEVEL = logging.DEBUG
34
35 _STARTUP_ATTACH = "attach"
36 _STARTUP_LAUNCH = "launch"
37
38 # GDB Signal numbers that are not target-specific used for common exceptions
39 TARGET_EXC_BAD_ACCESS = 0x91
40 TARGET_EXC_BAD_INSTRUCTION = 0x92
41 TARGET_EXC_ARITHMETIC = 0x93
42 TARGET_EXC_EMULATION = 0x94
43 TARGET_EXC_SOFTWARE = 0x95
44 TARGET_EXC_BREAKPOINT = 0x96
45
46 def setUp(self):
47 TestBase.setUp(self)
48 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
49 logging.basicConfig(format=FORMAT)
50 self.logger = logging.getLogger(__name__)
51 self.logger.setLevel(self._LOGGING_LEVEL)
52 self.test_sequence = GdbRemoteTestSequence(self.logger)
53 self.set_inferior_startup_launch()
Todd Fiala9e2d3292014-07-09 23:10:43 +000054 self.port = self.get_next_port()
Todd Fiala67041192014-07-11 22:50:13 +000055 self.named_pipe_path = None
Todd Fiala24189d42014-07-14 06:24:44 +000056 self.named_pipe = None
57 self.named_pipe_fd = None
Todd Fialaf9ad21d2014-07-16 16:15:42 +000058 self.stub_sends_two_stop_notifications_on_kill = False
Todd Fiala31bde322014-07-26 20:39:17 +000059 self.stub_hostname = "localhost"
Todd Fialae50b2e42014-06-13 19:11:33 +000060
Todd Fiala9e2d3292014-07-09 23:10:43 +000061 def get_next_port(self):
62 return 12000 + random.randint(0,3999)
Todd Fialae50b2e42014-06-13 19:11:33 +000063
64 def reset_test_sequence(self):
65 self.test_sequence = GdbRemoteTestSequence(self.logger)
66
Todd Fiala24189d42014-07-14 06:24:44 +000067 def create_named_pipe(self):
68 # Create a temp dir and name for a pipe.
69 temp_dir = tempfile.mkdtemp()
70 named_pipe_path = os.path.join(temp_dir, "stub_port_number")
71
72 # Create the named pipe.
73 os.mkfifo(named_pipe_path)
74
75 # Open the read side of the pipe in non-blocking mode. This will return right away, ready or not.
76 named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK)
77
78 # Create the file for the named pipe. Note this will follow semantics of
79 # a non-blocking read side of a named pipe, which has different semantics
80 # than a named pipe opened for read in non-blocking mode.
81 named_pipe = os.fdopen(named_pipe_fd, "r")
82 self.assertIsNotNone(named_pipe)
83
84 def shutdown_named_pipe():
85 # Close the pipe.
86 try:
87 named_pipe.close()
88 except:
89 print "failed to close named pipe"
90 None
91
92 # Delete the pipe.
93 try:
94 os.remove(named_pipe_path)
95 except:
96 print "failed to delete named pipe: {}".format(named_pipe_path)
97 None
98
99 # Delete the temp directory.
100 try:
101 os.rmdir(temp_dir)
102 except:
103 print "failed to delete temp dir: {}, directory contents: '{}'".format(temp_dir, os.listdir(temp_dir))
104 None
105
106 # Add the shutdown hook to clean up the named pipe.
107 self.addTearDownHook(shutdown_named_pipe)
108
109 # Clear the port so the stub selects a port number.
110 self.port = 0
111
112 return (named_pipe_path, named_pipe, named_pipe_fd)
113
114 def get_stub_port_from_named_socket(self, read_timeout_seconds=5):
115 # Wait for something to read with a max timeout.
116 (ready_readers, _, _) = select.select([self.named_pipe_fd], [], [], read_timeout_seconds)
117 self.assertIsNotNone(ready_readers, "write side of pipe has not written anything - stub isn't writing to pipe.")
118 self.assertNotEqual(len(ready_readers), 0, "write side of pipe has not written anything - stub isn't writing to pipe.")
119
120 # Read the port from the named pipe.
121 stub_port_raw = self.named_pipe.read()
122 self.assertIsNotNone(stub_port_raw)
123 self.assertNotEqual(len(stub_port_raw), 0, "no content to read on pipe")
124
125 # Trim null byte, convert to int.
126 stub_port_raw = stub_port_raw[:-1]
127 stub_port = int(stub_port_raw)
128 self.assertTrue(stub_port > 0)
129
130 return stub_port
131
132 def init_llgs_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000133 self.debug_monitor_exe = get_lldb_gdbserver_exe()
134 if not self.debug_monitor_exe:
135 self.skipTest("lldb_gdbserver exe not found")
Todd Fiala8aae4f42014-06-13 23:34:17 +0000136 self.debug_monitor_extra_args = " -c 'log enable -T -f process-{}.log lldb break process thread' -c 'log enable -T -f packets-{}.log gdb-remote packets'".format(self.id(), self.id(), self.id())
Todd Fiala24189d42014-07-14 06:24:44 +0000137 if use_named_pipe:
138 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialae50b2e42014-06-13 19:11:33 +0000139
Todd Fiala24189d42014-07-14 06:24:44 +0000140 def init_debugserver_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000141 self.debug_monitor_exe = get_debugserver_exe()
142 if not self.debug_monitor_exe:
143 self.skipTest("debugserver exe not found")
144 self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName)
Todd Fiala24189d42014-07-14 06:24:44 +0000145 if use_named_pipe:
146 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialaf9ad21d2014-07-16 16:15:42 +0000147 # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification
148 # when the process truly dies.
149 self.stub_sends_two_stop_notifications_on_kill = True
Todd Fialae50b2e42014-06-13 19:11:33 +0000150
151 def create_socket(self):
152 sock = socket.socket()
153 logger = self.logger
154
155 def shutdown_socket():
156 if sock:
157 try:
158 # send the kill packet so lldb-gdbserver shuts down gracefully
159 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
160 except:
161 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
162
163 try:
164 sock.close()
165 except:
166 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
167
168 self.addTearDownHook(shutdown_socket)
169
Todd Fiala31bde322014-07-26 20:39:17 +0000170 connect_info = (self.stub_hostname, self.port)
Todd Fiala24189d42014-07-14 06:24:44 +0000171 # print "connecting to stub on {}:{}".format(connect_info[0], connect_info[1])
172 sock.connect(connect_info)
173
Todd Fialae50b2e42014-06-13 19:11:33 +0000174 return sock
175
176 def set_inferior_startup_launch(self):
177 self._inferior_startup = self._STARTUP_LAUNCH
178
179 def set_inferior_startup_attach(self):
180 self._inferior_startup = self._STARTUP_ATTACH
181
Todd Fiala31bde322014-07-26 20:39:17 +0000182 def get_debug_monitor_command_line(self, attach_pid=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000183 commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port)
184 if attach_pid:
185 commandline += " --attach=%d" % attach_pid
Todd Fiala67041192014-07-11 22:50:13 +0000186 if self.named_pipe_path:
187 commandline += " --named-pipe %s" % self.named_pipe_path
Todd Fiala31bde322014-07-26 20:39:17 +0000188 return commandline
189
190 def launch_debug_monitor(self, attach_pid=None, logfile=None):
191 # Create the command line.
192 import pexpect
193 commandline = self.get_debug_monitor_command_line(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000194
Todd Fiala8aae4f42014-06-13 23:34:17 +0000195 # Start the server.
Todd Fiala31bde322014-07-26 20:39:17 +0000196 server = pexpect.spawn(commandline, logfile=logfile)
Todd Fiala24189d42014-07-14 06:24:44 +0000197 self.assertIsNotNone(server)
198 server.expect(r"(debugserver|lldb-gdbserver)", timeout=10)
199
200 # If we're receiving the stub's listening port from the named pipe, do that here.
201 if self.named_pipe:
202 self.port = self.get_stub_port_from_named_socket()
203 # print "debug server listening on {}".format(self.port)
Todd Fialae50b2e42014-06-13 19:11:33 +0000204
205 # Turn on logging for what the child sends back.
206 if self.TraceOn():
207 server.logfile_read = sys.stdout
208
Todd Fiala8aae4f42014-06-13 23:34:17 +0000209 return server
210
211 def connect_to_debug_monitor(self, attach_pid=None):
Todd Fiala24189d42014-07-14 06:24:44 +0000212 if self.named_pipe:
213 # Create the stub.
214 server = self.launch_debug_monitor(attach_pid=attach_pid)
215 self.assertIsNotNone(server)
216
217 def shutdown_debug_monitor():
218 try:
219 server.close()
220 except:
221 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
222 self.addTearDownHook(shutdown_debug_monitor)
223
224 # Schedule debug monitor to be shut down during teardown.
225 logger = self.logger
226
227 # Attach to the stub and return a socket opened to it.
228 self.sock = self.create_socket()
229 return server
230
231 # We're using a random port algorithm to try not to collide with other ports,
232 # and retry a max # times.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000233 attempts = 0
234 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000235
Todd Fiala8aae4f42014-06-13 23:34:17 +0000236 while attempts < MAX_ATTEMPTS:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000237 server = self.launch_debug_monitor(attach_pid=attach_pid)
238
239 # Wait until we receive the server ready message before continuing.
240 port_good = True
Todd Fiala8aae4f42014-06-13 23:34:17 +0000241 try:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000242 server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port))
243 except:
244 port_good = False
245 server.close()
Todd Fialae50b2e42014-06-13 19:11:33 +0000246
Todd Fiala9e2d3292014-07-09 23:10:43 +0000247 if port_good:
248 # Schedule debug monitor to be shut down during teardown.
249 logger = self.logger
250 def shutdown_debug_monitor():
251 try:
252 server.close()
253 except:
254 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
255 self.addTearDownHook(shutdown_debug_monitor)
Todd Fiala8aae4f42014-06-13 23:34:17 +0000256
Todd Fiala9e2d3292014-07-09 23:10:43 +0000257 # Create a socket to talk to the server
258 try:
259 self.sock = self.create_socket()
260 return server
261 except socket.error as serr:
262 # We're only trying to handle connection refused.
263 if serr.errno != errno.ECONNREFUSED:
264 raise serr
265 # We should close the server here to be safe.
266 server.close()
267
268 # Increment attempts.
269 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
270 attempts += 1
271
272 # And wait a random length of time before next attempt, to avoid collisions.
273 time.sleep(random.randint(1,5))
274
275 # Now grab a new port number.
276 self.port = self.get_next_port()
Todd Fiala8aae4f42014-06-13 23:34:17 +0000277
278 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000279
280 def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3):
281 # We're going to start a child process that the debug monitor stub can later attach to.
282 # This process needs to be started so that it just hangs around for a while. We'll
283 # have it sleep.
284 exe_path = os.path.abspath("a.out")
285
286 args = [exe_path]
287 if inferior_args:
288 args.extend(inferior_args)
289 if sleep_seconds:
290 args.append("sleep:%d" % sleep_seconds)
291
292 return subprocess.Popen(args)
293
294 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3):
295 """Prep the debug monitor, the inferior, and the expected packet stream.
296
297 Handle the separate cases of using the debug monitor in attach-to-inferior mode
298 and in launch-inferior mode.
299
300 For attach-to-inferior mode, the inferior process is first started, then
301 the debug monitor is started in attach to pid mode (using --attach on the
302 stub command line), and the no-ack-mode setup is appended to the packet
303 stream. The packet stream is not yet executed, ready to have more expected
304 packet entries added to it.
305
306 For launch-inferior mode, the stub is first started, then no ack mode is
307 setup on the expected packet stream, then the verified launch packets are added
308 to the expected socket stream. The packet stream is not yet executed, ready
309 to have more expected packet entries added to it.
310
311 The return value is:
312 {inferior:<inferior>, server:<server>}
313 """
314 inferior = None
315 attach_pid = None
316
317 if self._inferior_startup == self._STARTUP_ATTACH:
318 # Launch the process that we'll use as the inferior.
319 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds)
320 self.assertIsNotNone(inferior)
321 self.assertTrue(inferior.pid > 0)
322 attach_pid = inferior.pid
323
324 # Launch the debug monitor stub, attaching to the inferior.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000325 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000326 self.assertIsNotNone(server)
327
328 if self._inferior_startup == self._STARTUP_LAUNCH:
329 # Build launch args
330 launch_args = [os.path.abspath('a.out')]
331 if inferior_args:
332 launch_args.extend(inferior_args)
333
334 # Build the expected protocol stream
335 self.add_no_ack_remote_stream()
336 if self._inferior_startup == self._STARTUP_LAUNCH:
337 self.add_verified_launch_packets(launch_args)
338
339 return {"inferior":inferior, "server":server}
340
Todd Fiala31bde322014-07-26 20:39:17 +0000341 def expect_socket_recv(self, sock, expected_content_regex, timeout_seconds):
342 response = ""
343 timeout_time = time.time() + timeout_seconds
344
345 while not expected_content_regex.match(response) and time.time() < timeout_time:
346 can_read, _, _ = select.select([sock], [], [], timeout_seconds)
347 if can_read and sock in can_read:
348 recv_bytes = sock.recv(4096)
349 if recv_bytes:
350 response += recv_bytes
351
352 self.assertTrue(expected_content_regex.match(response))
353
354 def expect_socket_send(self, sock, content, timeout_seconds):
355 request_bytes_remaining = content
356 timeout_time = time.time() + timeout_seconds
357
358 while len(request_bytes_remaining) > 0 and time.time() < timeout_time:
359 _, can_write, _ = select.select([], [sock], [], timeout_seconds)
360 if can_write and sock in can_write:
361 written_byte_count = sock.send(request_bytes_remaining)
362 request_bytes_remaining = request_bytes_remaining[written_byte_count:]
363 self.assertEquals(len(request_bytes_remaining), 0)
364
365 def do_handshake(self, stub_socket, timeout_seconds=5):
366 # Write the ack.
367 self.expect_socket_send(stub_socket, "+", timeout_seconds)
368
369 # Send the start no ack mode packet.
370 NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0"
371 bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST)
372 self.assertEquals(bytes_sent, len(NO_ACK_MODE_REQUEST))
373
374 # Receive the ack and "OK"
375 self.expect_socket_recv(stub_socket, re.compile(r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds)
376
377 # Send the final ack.
378 self.expect_socket_send(stub_socket, "+", timeout_seconds)
379
Todd Fialae50b2e42014-06-13 19:11:33 +0000380 def add_no_ack_remote_stream(self):
381 self.test_sequence.add_log_lines(
382 ["read packet: +",
383 "read packet: $QStartNoAckMode#b0",
384 "send packet: +",
385 "send packet: $OK#9a",
386 "read packet: +"],
387 True)
388
389 def add_verified_launch_packets(self, launch_args):
390 self.test_sequence.add_log_lines(
391 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
392 "send packet: $OK#00",
393 "read packet: $qLaunchSuccess#a5",
394 "send packet: $OK#00"],
395 True)
396
397 def add_thread_suffix_request_packets(self):
398 self.test_sequence.add_log_lines(
399 ["read packet: $QThreadSuffixSupported#00",
400 "send packet: $OK#00",
401 ], True)
402
403 def add_process_info_collection_packets(self):
404 self.test_sequence.add_log_lines(
405 ["read packet: $qProcessInfo#00",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000406 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000407 True)
408
409 _KNOWN_PROCESS_INFO_KEYS = [
410 "pid",
411 "parent-pid",
412 "real-uid",
413 "real-gid",
414 "effective-uid",
415 "effective-gid",
416 "cputype",
417 "cpusubtype",
418 "ostype",
419 "vendor",
420 "endian",
421 "ptrsize"
422 ]
423
424 def parse_process_info_response(self, context):
425 # Ensure we have a process info response.
426 self.assertIsNotNone(context)
427 process_info_raw = context.get("process_info_raw")
428 self.assertIsNotNone(process_info_raw)
429
430 # Pull out key:value; pairs.
431 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
432
433 # Validate keys are known.
434 for (key, val) in process_info_dict.items():
435 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
436 self.assertIsNotNone(val)
437
438 return process_info_dict
439
440 def add_register_info_collection_packets(self):
441 self.test_sequence.add_log_lines(
442 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
443 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
444 "save_key":"reg_info_responses" } ],
445 True)
446
447 def parse_register_info_packets(self, context):
448 """Return an array of register info dictionaries, one per register info."""
449 reg_info_responses = context.get("reg_info_responses")
450 self.assertIsNotNone(reg_info_responses)
451
452 # Parse register infos.
453 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
454
Todd Fiala50a211b2014-06-14 22:00:36 +0000455 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000456 if not timeout_seconds:
457 timeout_seconds = self._TIMEOUT_SECONDS
458 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000459
460 _KNOWN_REGINFO_KEYS = [
461 "name",
462 "alt-name",
463 "bitsize",
464 "offset",
465 "encoding",
466 "format",
467 "set",
468 "gcc",
469 "dwarf",
470 "generic",
471 "container-regs",
472 "invalidate-regs"
473 ]
474
475 def assert_valid_reg_info(self, reg_info):
476 # Assert we know about all the reginfo keys parsed.
477 for key in reg_info:
478 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
479
480 # Check the bare-minimum expected set of register info keys.
481 self.assertTrue("name" in reg_info)
482 self.assertTrue("bitsize" in reg_info)
483 self.assertTrue("offset" in reg_info)
484 self.assertTrue("encoding" in reg_info)
485 self.assertTrue("format" in reg_info)
486
487 def find_pc_reg_info(self, reg_infos):
488 lldb_reg_index = 0
489 for reg_info in reg_infos:
490 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
491 return (lldb_reg_index, reg_info)
492 lldb_reg_index += 1
493
494 return (None, None)
495
496 def add_lldb_register_index(self, reg_infos):
497 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
498
499 We'll use this when we want to call packets like P/p with a register index but do so
500 on only a subset of the full register info set.
501 """
502 self.assertIsNotNone(reg_infos)
503
504 reg_index = 0
505 for reg_info in reg_infos:
506 reg_info["lldb_register_index"] = reg_index
507 reg_index += 1
508
509 def add_query_memory_region_packets(self, address):
510 self.test_sequence.add_log_lines(
511 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
512 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
513 True)
514
Todd Fialac30281a2014-06-14 03:03:23 +0000515 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000516 self.assertIsNotNone(key_val_text)
517 kv_dict = {}
518 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000519 key = match.group(1)
520 val = match.group(2)
521 if key in kv_dict:
522 if allow_dupes:
523 if type(kv_dict[key]) == list:
524 kv_dict[key].append(val)
525 else:
526 # Promote to list
527 kv_dict[key] = [kv_dict[key], val]
528 else:
529 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
530 else:
531 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000532 return kv_dict
533
534 def parse_memory_region_packet(self, context):
535 # Ensure we have a context.
536 self.assertIsNotNone(context.get("memory_region_response"))
537
538 # Pull out key:value; pairs.
539 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
540
541 # Validate keys are known.
542 for (key, val) in mem_region_dict.items():
543 self.assertTrue(key in ["start", "size", "permissions", "error"])
544 self.assertIsNotNone(val)
545
546 # Return the dictionary of key-value pairs for the memory region.
547 return mem_region_dict
548
549 def assert_address_within_memory_region(self, test_address, mem_region_dict):
550 self.assertIsNotNone(mem_region_dict)
551 self.assertTrue("start" in mem_region_dict)
552 self.assertTrue("size" in mem_region_dict)
553
554 range_start = int(mem_region_dict["start"], 16)
555 range_size = int(mem_region_dict["size"], 16)
556 range_end = range_start + range_size
557
558 if test_address < range_start:
559 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
560 elif test_address >= range_end:
561 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
562
563 def add_threadinfo_collection_packets(self):
564 self.test_sequence.add_log_lines(
565 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
566 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
567 "save_key":"threadinfo_responses" } ],
568 True)
569
570 def parse_threadinfo_packets(self, context):
571 """Return an array of thread ids (decimal ints), one per thread."""
572 threadinfo_responses = context.get("threadinfo_responses")
573 self.assertIsNotNone(threadinfo_responses)
574
575 thread_ids = []
576 for threadinfo_response in threadinfo_responses:
577 new_thread_infos = parse_threadinfo_response(threadinfo_response)
578 thread_ids.extend(new_thread_infos)
579 return thread_ids
580
581 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
582 start_time = time.time()
583 timeout_time = start_time + timeout_seconds
584
585 actual_thread_count = 0
586 while actual_thread_count < thread_count:
587 self.reset_test_sequence()
588 self.add_threadinfo_collection_packets()
589
590 context = self.expect_gdbremote_sequence()
591 self.assertIsNotNone(context)
592
593 threads = self.parse_threadinfo_packets(context)
594 self.assertIsNotNone(threads)
595
596 actual_thread_count = len(threads)
597
598 if time.time() > timeout_time:
599 raise Exception(
600 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
601 timeout_seconds, thread_count, actual_thread_count))
602
603 return threads
604
605 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
606 self.test_sequence.add_log_lines(
607 [# Set the breakpoint.
608 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
609 # Verify the stub could set it.
610 "send packet: $OK#00",
611 ], True)
612
613 if (do_continue):
614 self.test_sequence.add_log_lines(
615 [# Continue the inferior.
616 "read packet: $c#00",
617 # Expect a breakpoint stop report.
618 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
619 ], True)
620
621 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
622 self.test_sequence.add_log_lines(
623 [# Remove the breakpoint.
624 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
625 # Verify the stub could unset it.
626 "send packet: $OK#00",
627 ], True)
628
629 def add_qSupported_packets(self):
630 self.test_sequence.add_log_lines(
631 ["read packet: $qSupported#00",
632 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
633 ], True)
634
635 _KNOWN_QSUPPORTED_STUB_FEATURES = [
636 "augmented-libraries-svr4-read",
637 "PacketSize",
638 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000639 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000640 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000641 "qXfer:auxv:read",
642 "qXfer:libraries:read",
643 "qXfer:libraries-svr4:read",
644 ]
645
646 def parse_qSupported_response(self, context):
647 self.assertIsNotNone(context)
648
649 raw_response = context.get("qSupported_response")
650 self.assertIsNotNone(raw_response)
651
652 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
653 # +,-,? is stripped from the key and set as the value.
654 supported_dict = {}
655 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
656 key = match.group(1)
657 val = match.group(3)
658
659 # key=val: store as is
660 if val and len(val) > 0:
661 supported_dict[key] = val
662 else:
663 if len(key) < 2:
664 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
665 supported_type = key[-1]
666 key = key[:-1]
667 if not supported_type in ["+", "-", "?"]:
668 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
669 supported_dict[key] = supported_type
670 # Ensure we know the supported element
671 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
672 raise Exception("unknown qSupported stub feature reported: %s" % key)
673
674 return supported_dict
675
676 def run_process_then_stop(self, run_seconds=1):
677 # Tell the stub to continue.
678 self.test_sequence.add_log_lines(
679 ["read packet: $vCont;c#00"],
680 True)
681 context = self.expect_gdbremote_sequence()
682
683 # Wait for run_seconds.
684 time.sleep(run_seconds)
685
686 # Send an interrupt, capture a T response.
687 self.reset_test_sequence()
688 self.test_sequence.add_log_lines(
689 ["read packet: {}".format(chr(03)),
690 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
691 True)
692 context = self.expect_gdbremote_sequence()
693 self.assertIsNotNone(context)
694 self.assertIsNotNone(context.get("stop_result"))
695
696 return context
697
698 def select_modifiable_register(self, reg_infos):
699 """Find a register that can be read/written freely."""
700 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
701
702 # First check for the first register from the preferred register name set.
703 alternative_register_index = None
704
705 self.assertIsNotNone(reg_infos)
706 for reg_info in reg_infos:
707 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
708 # We found a preferred register. Use it.
709 return reg_info["lldb_register_index"]
710 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
711 # A frame pointer register will do as a register to modify temporarily.
712 alternative_register_index = reg_info["lldb_register_index"]
713
714 # We didn't find a preferred register. Return whatever alternative register
715 # we found, if any.
716 return alternative_register_index
717
718 def extract_registers_from_stop_notification(self, stop_key_vals_text):
719 self.assertIsNotNone(stop_key_vals_text)
720 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
721
722 registers = {}
723 for (key, val) in kv_dict.items():
724 if re.match(r"^[0-9a-fA-F]+", key):
725 registers[int(key, 16)] = val
726 return registers
727
728 def gather_register_infos(self):
729 self.reset_test_sequence()
730 self.add_register_info_collection_packets()
731
732 context = self.expect_gdbremote_sequence()
733 self.assertIsNotNone(context)
734
735 reg_infos = self.parse_register_info_packets(context)
736 self.assertIsNotNone(reg_infos)
737 self.add_lldb_register_index(reg_infos)
738
739 return reg_infos
740
741 def find_generic_register_with_name(self, reg_infos, generic_name):
742 self.assertIsNotNone(reg_infos)
743 for reg_info in reg_infos:
744 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
745 return reg_info
746 return None
747
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000748 def decode_gdbremote_binary(self, encoded_bytes):
749 decoded_bytes = ""
750 i = 0
751 while i < len(encoded_bytes):
752 if encoded_bytes[i] == "}":
753 # Handle escaped char.
754 self.assertTrue(i + 1 < len(encoded_bytes))
755 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
756 i +=2
757 elif encoded_bytes[i] == "*":
758 # Handle run length encoding.
759 self.assertTrue(len(decoded_bytes) > 0)
760 self.assertTrue(i + 1 < len(encoded_bytes))
761 repeat_count = ord(encoded_bytes[i+1]) - 29
762 decoded_bytes += decoded_bytes[-1] * repeat_count
763 i += 2
764 else:
765 decoded_bytes += encoded_bytes[i]
766 i += 1
767 return decoded_bytes
768
769 def build_auxv_dict(self, endian, word_size, auxv_data):
770 self.assertIsNotNone(endian)
771 self.assertIsNotNone(word_size)
772 self.assertIsNotNone(auxv_data)
773
774 auxv_dict = {}
775
776 while len(auxv_data) > 0:
777 # Chop off key.
778 raw_key = auxv_data[:word_size]
779 auxv_data = auxv_data[word_size:]
780
781 # Chop of value.
782 raw_value = auxv_data[:word_size]
783 auxv_data = auxv_data[word_size:]
784
785 # Convert raw text from target endian.
786 key = unpack_endian_binary_string(endian, raw_key)
787 value = unpack_endian_binary_string(endian, raw_value)
788
789 # Handle ending entry.
790 if key == 0:
791 self.assertEquals(value, 0)
792 return auxv_dict
793
794 # The key should not already be present.
795 self.assertFalse(key in auxv_dict)
796 auxv_dict[key] = value
797
798 self.fail("should not reach here - implies required double zero entry not found")
799 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000800
801 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
802 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
803 offset = 0
804 done = False
805 decoded_data = ""
806
807 while not done:
808 # Grab the next iteration of data.
809 self.reset_test_sequence()
810 self.test_sequence.add_log_lines([
811 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000812 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000813 ], True)
814
815 context = self.expect_gdbremote_sequence()
816 self.assertIsNotNone(context)
817
818 response_type = context.get("response_type")
819 self.assertIsNotNone(response_type)
820 self.assertTrue(response_type in ["l", "m"])
821
822 # Move offset along.
823 offset += chunk_length
824
825 # Figure out if we're done. We're done if the response type is l.
826 done = response_type == "l"
827
828 # Decode binary data.
829 content_raw = context.get("content_raw")
830 if content_raw and len(content_raw) > 0:
831 self.assertIsNotNone(content_raw)
832 decoded_data += self.decode_gdbremote_binary(content_raw)
833 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000834
835 def add_interrupt_packets(self):
836 self.test_sequence.add_log_lines([
837 # Send the intterupt.
838 "read packet: {}".format(chr(03)),
839 # And wait for the stop notification.
840 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
841 ], True)
842
843 def parse_interrupt_packets(self, context):
844 self.assertIsNotNone(context.get("stop_signo"))
845 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000846 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
847
848 def add_QSaveRegisterState_packets(self, thread_id):
849 if thread_id:
850 # Use the thread suffix form.
851 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
852 else:
853 request = "read packet: $QSaveRegisterState#00"
854
855 self.test_sequence.add_log_lines([
856 request,
857 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
858 ], True)
859
860 def parse_QSaveRegisterState_response(self, context):
861 self.assertIsNotNone(context)
862
863 save_response = context.get("save_response")
864 self.assertIsNotNone(save_response)
865
866 if len(save_response) < 1 or save_response[0] == "E":
867 # error received
868 return (False, None)
869 else:
870 return (True, int(save_response))
871
872 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
873 if thread_id:
874 # Use the thread suffix form.
875 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
876 else:
877 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
878
879 self.test_sequence.add_log_lines([
880 request,
881 "send packet: $OK#00"
882 ], True)
883
884 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
885 self.assertIsNotNone(reg_infos)
886
887 successful_writes = 0
888 failed_writes = 0
889
890 for reg_info in reg_infos:
891 # Use the lldb register index added to the reg info. We're not necessarily
892 # working off a full set of register infos, so an inferred register index could be wrong.
893 reg_index = reg_info["lldb_register_index"]
894 self.assertIsNotNone(reg_index)
895
896 reg_byte_size = int(reg_info["bitsize"])/8
897 self.assertTrue(reg_byte_size > 0)
898
899 # Handle thread suffix.
900 if thread_id:
901 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
902 else:
903 p_request = "read packet: $p{:x}#00".format(reg_index)
904
905 # Read the existing value.
906 self.reset_test_sequence()
907 self.test_sequence.add_log_lines([
908 p_request,
909 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
910 ], True)
911 context = self.expect_gdbremote_sequence()
912 self.assertIsNotNone(context)
913
914 # Verify the response length.
915 p_response = context.get("p_response")
916 self.assertIsNotNone(p_response)
917 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
918
919 # Flip the value by xoring with all 1s
920 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
921 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
922 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
923
924 # Handle thread suffix for P.
925 if thread_id:
926 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
927 else:
928 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
929
930 # Write the flipped value to the register.
931 self.reset_test_sequence()
932 self.test_sequence.add_log_lines([
933 P_request,
934 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
935 ], True)
936 context = self.expect_gdbremote_sequence()
937 self.assertIsNotNone(context)
938
939 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
940 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
941 # all flipping perfectly.
942 P_response = context.get("P_response")
943 self.assertIsNotNone(P_response)
944 if P_response == "OK":
945 successful_writes += 1
946 else:
947 failed_writes += 1
948 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
949
950 # Read back the register value, ensure it matches the flipped value.
951 if P_response == "OK":
952 self.reset_test_sequence()
953 self.test_sequence.add_log_lines([
954 p_request,
955 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
956 ], True)
957 context = self.expect_gdbremote_sequence()
958 self.assertIsNotNone(context)
959
960 verify_p_response_raw = context.get("p_response")
961 self.assertIsNotNone(verify_p_response_raw)
962 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
963
964 if verify_bits != flipped_bits_int:
965 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
966 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
967 successful_writes -= 1
968 failed_writes +=1
969
970 return (successful_writes, failed_writes)
971
972 def is_bit_flippable_register(self, reg_info):
973 if not reg_info:
974 return False
975 if not "set" in reg_info:
976 return False
977 if reg_info["set"] != "General Purpose Registers":
978 return False
979 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
980 # Don't try to bit flip registers contained in another register.
981 return False
982 if re.match("^.s$", reg_info["name"]):
983 # This is a 2-letter register name that ends in "s", like a segment register.
984 # Don't try to bit flip these.
985 return False
986 # Okay, this looks fine-enough.
987 return True
988
989 def read_register_values(self, reg_infos, endian, thread_id=None):
990 self.assertIsNotNone(reg_infos)
991 values = {}
992
993 for reg_info in reg_infos:
994 # We append a register index when load reg infos so we can work with subsets.
995 reg_index = reg_info.get("lldb_register_index")
996 self.assertIsNotNone(reg_index)
997
998 # Handle thread suffix.
999 if thread_id:
1000 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
1001 else:
1002 p_request = "read packet: $p{:x}#00".format(reg_index)
1003
1004 # Read it with p.
1005 self.reset_test_sequence()
1006 self.test_sequence.add_log_lines([
1007 p_request,
1008 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
1009 ], True)
1010 context = self.expect_gdbremote_sequence()
1011 self.assertIsNotNone(context)
1012
1013 # Convert value from target endian to integral.
1014 p_response = context.get("p_response")
1015 self.assertIsNotNone(p_response)
1016 self.assertTrue(len(p_response) > 0)
1017 self.assertFalse(p_response[0] == "E")
1018
1019 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
1020
Todd Fialae2202002014-06-27 22:11:56 +00001021 return values
1022
1023 def add_vCont_query_packets(self):
1024 self.test_sequence.add_log_lines([
1025 "read packet: $vCont?#00",
1026 {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } },
1027 ], True)
1028
1029 def parse_vCont_query_response(self, context):
1030 self.assertIsNotNone(context)
1031 vCont_query_response = context.get("vCont_query_response")
1032
1033 # Handle case of no vCont support at all - in which case the capture group will be none or zero length.
1034 if not vCont_query_response or len(vCont_query_response) == 0:
1035 return {}
1036
1037 return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0}
1038
1039 def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"):
1040 """Used by single step test that appears in a few different contexts."""
1041 single_step_count = 0
1042
1043 while single_step_count < max_step_count:
1044 self.assertIsNotNone(thread_id)
1045
1046 # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id.
1047 step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
1048 # print "\nstep_packet created: {}\n".format(step_packet)
1049
1050 # Single step.
1051 self.reset_test_sequence()
1052 if use_Hc_packet:
1053 self.test_sequence.add_log_lines(
1054 [# Set the continue thread.
1055 "read packet: $Hc{0:x}#00".format(thread_id),
1056 "send packet: $OK#00",
1057 ], True)
1058 self.test_sequence.add_log_lines([
1059 # Single step.
1060 step_packet,
1061 # "read packet: $vCont;s:{0:x}#00".format(thread_id),
1062 # Expect a breakpoint stop report.
1063 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
1064 ], True)
1065 context = self.expect_gdbremote_sequence()
1066 self.assertIsNotNone(context)
1067 self.assertIsNotNone(context.get("stop_signo"))
1068 self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP)
1069
1070 single_step_count += 1
1071
1072 # See if the predicate is true. If so, we're done.
1073 if predicate(args):
1074 return (True, single_step_count)
1075
1076 # The predicate didn't return true within the runaway step count.
1077 return (False, single_step_count)
1078
1079 def g_c1_c2_contents_are(self, args):
1080 """Used by single step test that appears in a few different contexts."""
1081 g_c1_address = args["g_c1_address"]
1082 g_c2_address = args["g_c2_address"]
1083 expected_g_c1 = args["expected_g_c1"]
1084 expected_g_c2 = args["expected_g_c2"]
1085
1086 # Read g_c1 and g_c2 contents.
1087 self.reset_test_sequence()
1088 self.test_sequence.add_log_lines(
1089 ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
1090 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} },
1091 "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
1092 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }],
1093 True)
1094
1095 # Run the packet stream.
1096 context = self.expect_gdbremote_sequence()
1097 self.assertIsNotNone(context)
1098
1099 # Check if what we read from inferior memory is what we are expecting.
1100 self.assertIsNotNone(context.get("g_c1_contents"))
1101 self.assertIsNotNone(context.get("g_c2_contents"))
1102
1103 return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2)
1104
1105 def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"):
1106 """Used by single step test that appears in a few different contexts."""
1107 # Start up the inferior.
1108 procs = self.prep_debug_monitor_and_inferior(
1109 inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"])
1110
1111 # Run the process
1112 self.test_sequence.add_log_lines(
1113 [# Start running after initial stop.
1114 "read packet: $c#00",
1115 # Match output line that prints the memory address of the function call entry point.
1116 # Note we require launch-only testing so we can get inferior otuput.
1117 { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
1118 "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} },
1119 # Now stop the inferior.
1120 "read packet: {}".format(chr(03)),
1121 # And wait for the stop notification.
1122 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }],
1123 True)
1124
1125 # Run the packet stream.
1126 context = self.expect_gdbremote_sequence()
1127 self.assertIsNotNone(context)
1128
1129 # Grab the main thread id.
1130 self.assertIsNotNone(context.get("stop_thread_id"))
1131 main_thread_id = int(context.get("stop_thread_id"), 16)
1132
1133 # Grab the function address.
1134 self.assertIsNotNone(context.get("function_address"))
1135 function_address = int(context.get("function_address"), 16)
1136
1137 # Grab the data addresses.
1138 self.assertIsNotNone(context.get("g_c1_address"))
1139 g_c1_address = int(context.get("g_c1_address"), 16)
1140
1141 self.assertIsNotNone(context.get("g_c2_address"))
1142 g_c2_address = int(context.get("g_c2_address"), 16)
1143
1144 # Set a breakpoint at the given address.
1145 # Note this might need to be switched per platform (ARM, mips, etc.).
1146 BREAKPOINT_KIND = 1
1147 self.reset_test_sequence()
1148 self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND)
1149 context = self.expect_gdbremote_sequence()
1150 self.assertIsNotNone(context)
1151
1152 # Remove the breakpoint.
1153 self.reset_test_sequence()
1154 self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND)
1155 context = self.expect_gdbremote_sequence()
1156 self.assertIsNotNone(context)
1157
1158 # Verify g_c1 and g_c2 match expected initial state.
1159 args = {}
1160 args["g_c1_address"] = g_c1_address
1161 args["g_c2_address"] = g_c2_address
1162 args["expected_g_c1"] = "0"
1163 args["expected_g_c2"] = "1"
1164
1165 self.assertTrue(self.g_c1_c2_contents_are(args))
1166
1167 # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code.
1168 args["expected_g_c1"] = "1"
1169 args["expected_g_c2"] = "1"
1170 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1171 self.assertTrue(state_reached)
1172
1173 # Verify we hit the next state.
1174 args["expected_g_c1"] = "1"
1175 args["expected_g_c2"] = "0"
1176 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1177 self.assertTrue(state_reached)
1178 self.assertEquals(step_count, 1)
1179
1180 # Verify we hit the next state.
1181 args["expected_g_c1"] = "0"
1182 args["expected_g_c2"] = "0"
1183 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1184 self.assertTrue(state_reached)
1185 self.assertEquals(step_count, 1)
1186
1187 # Verify we hit the next state.
1188 args["expected_g_c1"] = "0"
1189 args["expected_g_c2"] = "1"
1190 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1191 self.assertTrue(state_reached)
1192 self.assertEquals(step_count, 1)
Todd Fialaaf245d12014-06-30 21:05:18 +00001193