blob: 15aca47d046bca6339093253a087af5e735e8108 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fiala24189d42014-07-14 06:24:44 +00006import os
7import os.path
Todd Fialae50b2e42014-06-13 19:11:33 +00008import platform
Todd Fiala9e2d3292014-07-09 23:10:43 +00009import random
Todd Fialae2202002014-06-27 22:11:56 +000010import re
Todd Fiala31bde322014-07-26 20:39:17 +000011import select
Todd Fialae50b2e42014-06-13 19:11:33 +000012import sets
13import signal
14import socket
15import subprocess
16import sys
Todd Fiala24189d42014-07-14 06:24:44 +000017import tempfile
Todd Fialae50b2e42014-06-13 19:11:33 +000018import time
Todd Fiala24189d42014-07-14 06:24:44 +000019import unittest2
Todd Fialae50b2e42014-06-13 19:11:33 +000020from lldbtest import *
21from lldbgdbserverutils import *
22import logging
Todd Fialae50b2e42014-06-13 19:11:33 +000023
24class GdbRemoteTestCaseBase(TestBase):
25
26 mydir = TestBase.compute_mydir(__file__)
27
Todd Fialae50b2e42014-06-13 19:11:33 +000028 _TIMEOUT_SECONDS = 5
29
30 _GDBREMOTE_KILL_PACKET = "$k#6b"
31
32 _LOGGING_LEVEL = logging.WARNING
33 # _LOGGING_LEVEL = logging.DEBUG
34
Todd Fiala7306cf32014-07-29 22:30:01 +000035 # Start the inferior separately, attach to the inferior on the stub command line.
Todd Fialae50b2e42014-06-13 19:11:33 +000036 _STARTUP_ATTACH = "attach"
Todd Fiala7306cf32014-07-29 22:30:01 +000037 # Start the inferior separately, start the stub without attaching, allow the test to attach to the inferior however it wants (e.g. $vAttach;pid).
38 _STARTUP_ATTACH_MANUALLY = "attach_manually"
39 # Start the stub, and launch the inferior with an $A packet via the initial packet stream.
Todd Fialae50b2e42014-06-13 19:11:33 +000040 _STARTUP_LAUNCH = "launch"
41
42 # GDB Signal numbers that are not target-specific used for common exceptions
43 TARGET_EXC_BAD_ACCESS = 0x91
44 TARGET_EXC_BAD_INSTRUCTION = 0x92
45 TARGET_EXC_ARITHMETIC = 0x93
46 TARGET_EXC_EMULATION = 0x94
47 TARGET_EXC_SOFTWARE = 0x95
48 TARGET_EXC_BREAKPOINT = 0x96
49
50 def setUp(self):
51 TestBase.setUp(self)
52 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
53 logging.basicConfig(format=FORMAT)
54 self.logger = logging.getLogger(__name__)
55 self.logger.setLevel(self._LOGGING_LEVEL)
56 self.test_sequence = GdbRemoteTestSequence(self.logger)
57 self.set_inferior_startup_launch()
Todd Fiala9e2d3292014-07-09 23:10:43 +000058 self.port = self.get_next_port()
Todd Fiala67041192014-07-11 22:50:13 +000059 self.named_pipe_path = None
Todd Fiala24189d42014-07-14 06:24:44 +000060 self.named_pipe = None
61 self.named_pipe_fd = None
Todd Fialaf9ad21d2014-07-16 16:15:42 +000062 self.stub_sends_two_stop_notifications_on_kill = False
Todd Fiala31bde322014-07-26 20:39:17 +000063 self.stub_hostname = "localhost"
Todd Fialae50b2e42014-06-13 19:11:33 +000064
Todd Fiala9e2d3292014-07-09 23:10:43 +000065 def get_next_port(self):
66 return 12000 + random.randint(0,3999)
Todd Fialae50b2e42014-06-13 19:11:33 +000067
68 def reset_test_sequence(self):
69 self.test_sequence = GdbRemoteTestSequence(self.logger)
70
Todd Fiala24189d42014-07-14 06:24:44 +000071 def create_named_pipe(self):
72 # Create a temp dir and name for a pipe.
73 temp_dir = tempfile.mkdtemp()
74 named_pipe_path = os.path.join(temp_dir, "stub_port_number")
75
76 # Create the named pipe.
77 os.mkfifo(named_pipe_path)
78
79 # Open the read side of the pipe in non-blocking mode. This will return right away, ready or not.
80 named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK)
81
82 # Create the file for the named pipe. Note this will follow semantics of
83 # a non-blocking read side of a named pipe, which has different semantics
84 # than a named pipe opened for read in non-blocking mode.
85 named_pipe = os.fdopen(named_pipe_fd, "r")
86 self.assertIsNotNone(named_pipe)
87
88 def shutdown_named_pipe():
89 # Close the pipe.
90 try:
91 named_pipe.close()
92 except:
93 print "failed to close named pipe"
94 None
95
96 # Delete the pipe.
97 try:
98 os.remove(named_pipe_path)
99 except:
100 print "failed to delete named pipe: {}".format(named_pipe_path)
101 None
102
103 # Delete the temp directory.
104 try:
105 os.rmdir(temp_dir)
106 except:
107 print "failed to delete temp dir: {}, directory contents: '{}'".format(temp_dir, os.listdir(temp_dir))
108 None
109
110 # Add the shutdown hook to clean up the named pipe.
111 self.addTearDownHook(shutdown_named_pipe)
112
113 # Clear the port so the stub selects a port number.
114 self.port = 0
115
116 return (named_pipe_path, named_pipe, named_pipe_fd)
117
118 def get_stub_port_from_named_socket(self, read_timeout_seconds=5):
119 # Wait for something to read with a max timeout.
120 (ready_readers, _, _) = select.select([self.named_pipe_fd], [], [], read_timeout_seconds)
121 self.assertIsNotNone(ready_readers, "write side of pipe has not written anything - stub isn't writing to pipe.")
122 self.assertNotEqual(len(ready_readers), 0, "write side of pipe has not written anything - stub isn't writing to pipe.")
123
124 # Read the port from the named pipe.
125 stub_port_raw = self.named_pipe.read()
126 self.assertIsNotNone(stub_port_raw)
127 self.assertNotEqual(len(stub_port_raw), 0, "no content to read on pipe")
128
129 # Trim null byte, convert to int.
130 stub_port_raw = stub_port_raw[:-1]
131 stub_port = int(stub_port_raw)
132 self.assertTrue(stub_port > 0)
133
134 return stub_port
135
136 def init_llgs_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000137 self.debug_monitor_exe = get_lldb_gdbserver_exe()
138 if not self.debug_monitor_exe:
139 self.skipTest("lldb_gdbserver exe not found")
Todd Fiala8aae4f42014-06-13 23:34:17 +0000140 self.debug_monitor_extra_args = " -c 'log enable -T -f process-{}.log lldb break process thread' -c 'log enable -T -f packets-{}.log gdb-remote packets'".format(self.id(), self.id(), self.id())
Todd Fiala24189d42014-07-14 06:24:44 +0000141 if use_named_pipe:
142 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialae50b2e42014-06-13 19:11:33 +0000143
Todd Fiala24189d42014-07-14 06:24:44 +0000144 def init_debugserver_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000145 self.debug_monitor_exe = get_debugserver_exe()
146 if not self.debug_monitor_exe:
147 self.skipTest("debugserver exe not found")
148 self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName)
Todd Fiala24189d42014-07-14 06:24:44 +0000149 if use_named_pipe:
150 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialaf9ad21d2014-07-16 16:15:42 +0000151 # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification
152 # when the process truly dies.
153 self.stub_sends_two_stop_notifications_on_kill = True
Todd Fialae50b2e42014-06-13 19:11:33 +0000154
155 def create_socket(self):
156 sock = socket.socket()
157 logger = self.logger
158
159 def shutdown_socket():
160 if sock:
161 try:
162 # send the kill packet so lldb-gdbserver shuts down gracefully
163 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
164 except:
165 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
166
167 try:
168 sock.close()
169 except:
170 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
171
172 self.addTearDownHook(shutdown_socket)
173
Todd Fiala31bde322014-07-26 20:39:17 +0000174 connect_info = (self.stub_hostname, self.port)
Todd Fiala24189d42014-07-14 06:24:44 +0000175 # print "connecting to stub on {}:{}".format(connect_info[0], connect_info[1])
176 sock.connect(connect_info)
177
Todd Fialae50b2e42014-06-13 19:11:33 +0000178 return sock
179
180 def set_inferior_startup_launch(self):
181 self._inferior_startup = self._STARTUP_LAUNCH
182
183 def set_inferior_startup_attach(self):
184 self._inferior_startup = self._STARTUP_ATTACH
185
Todd Fiala7306cf32014-07-29 22:30:01 +0000186 def set_inferior_startup_attach_manually(self):
187 self._inferior_startup = self._STARTUP_ATTACH_MANUALLY
188
Todd Fiala31bde322014-07-26 20:39:17 +0000189 def get_debug_monitor_command_line(self, attach_pid=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000190 commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port)
191 if attach_pid:
192 commandline += " --attach=%d" % attach_pid
Todd Fiala67041192014-07-11 22:50:13 +0000193 if self.named_pipe_path:
194 commandline += " --named-pipe %s" % self.named_pipe_path
Todd Fiala31bde322014-07-26 20:39:17 +0000195 return commandline
196
197 def launch_debug_monitor(self, attach_pid=None, logfile=None):
198 # Create the command line.
199 import pexpect
200 commandline = self.get_debug_monitor_command_line(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000201
Todd Fiala8aae4f42014-06-13 23:34:17 +0000202 # Start the server.
Todd Fiala31bde322014-07-26 20:39:17 +0000203 server = pexpect.spawn(commandline, logfile=logfile)
Todd Fiala24189d42014-07-14 06:24:44 +0000204 self.assertIsNotNone(server)
205 server.expect(r"(debugserver|lldb-gdbserver)", timeout=10)
206
207 # If we're receiving the stub's listening port from the named pipe, do that here.
208 if self.named_pipe:
209 self.port = self.get_stub_port_from_named_socket()
210 # print "debug server listening on {}".format(self.port)
Todd Fialae50b2e42014-06-13 19:11:33 +0000211
212 # Turn on logging for what the child sends back.
213 if self.TraceOn():
214 server.logfile_read = sys.stdout
215
Todd Fiala8aae4f42014-06-13 23:34:17 +0000216 return server
217
218 def connect_to_debug_monitor(self, attach_pid=None):
Todd Fiala24189d42014-07-14 06:24:44 +0000219 if self.named_pipe:
220 # Create the stub.
221 server = self.launch_debug_monitor(attach_pid=attach_pid)
222 self.assertIsNotNone(server)
223
224 def shutdown_debug_monitor():
225 try:
226 server.close()
227 except:
228 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
229 self.addTearDownHook(shutdown_debug_monitor)
230
231 # Schedule debug monitor to be shut down during teardown.
232 logger = self.logger
233
234 # Attach to the stub and return a socket opened to it.
235 self.sock = self.create_socket()
236 return server
237
238 # We're using a random port algorithm to try not to collide with other ports,
239 # and retry a max # times.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000240 attempts = 0
241 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000242
Todd Fiala8aae4f42014-06-13 23:34:17 +0000243 while attempts < MAX_ATTEMPTS:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000244 server = self.launch_debug_monitor(attach_pid=attach_pid)
245
246 # Wait until we receive the server ready message before continuing.
247 port_good = True
Todd Fiala8aae4f42014-06-13 23:34:17 +0000248 try:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000249 server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port))
250 except:
251 port_good = False
252 server.close()
Todd Fialae50b2e42014-06-13 19:11:33 +0000253
Todd Fiala9e2d3292014-07-09 23:10:43 +0000254 if port_good:
255 # Schedule debug monitor to be shut down during teardown.
256 logger = self.logger
257 def shutdown_debug_monitor():
258 try:
259 server.close()
260 except:
261 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
262 self.addTearDownHook(shutdown_debug_monitor)
Todd Fiala8aae4f42014-06-13 23:34:17 +0000263
Todd Fiala9e2d3292014-07-09 23:10:43 +0000264 # Create a socket to talk to the server
265 try:
266 self.sock = self.create_socket()
267 return server
268 except socket.error as serr:
269 # We're only trying to handle connection refused.
270 if serr.errno != errno.ECONNREFUSED:
271 raise serr
272 # We should close the server here to be safe.
273 server.close()
274
275 # Increment attempts.
276 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
277 attempts += 1
278
279 # And wait a random length of time before next attempt, to avoid collisions.
280 time.sleep(random.randint(1,5))
281
282 # Now grab a new port number.
283 self.port = self.get_next_port()
Todd Fiala8aae4f42014-06-13 23:34:17 +0000284
285 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000286
287 def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3):
288 # We're going to start a child process that the debug monitor stub can later attach to.
289 # This process needs to be started so that it just hangs around for a while. We'll
290 # have it sleep.
291 exe_path = os.path.abspath("a.out")
292
293 args = [exe_path]
294 if inferior_args:
295 args.extend(inferior_args)
296 if sleep_seconds:
297 args.append("sleep:%d" % sleep_seconds)
298
299 return subprocess.Popen(args)
300
301 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3):
302 """Prep the debug monitor, the inferior, and the expected packet stream.
303
304 Handle the separate cases of using the debug monitor in attach-to-inferior mode
305 and in launch-inferior mode.
306
307 For attach-to-inferior mode, the inferior process is first started, then
308 the debug monitor is started in attach to pid mode (using --attach on the
309 stub command line), and the no-ack-mode setup is appended to the packet
310 stream. The packet stream is not yet executed, ready to have more expected
311 packet entries added to it.
312
313 For launch-inferior mode, the stub is first started, then no ack mode is
314 setup on the expected packet stream, then the verified launch packets are added
315 to the expected socket stream. The packet stream is not yet executed, ready
316 to have more expected packet entries added to it.
317
318 The return value is:
319 {inferior:<inferior>, server:<server>}
320 """
321 inferior = None
322 attach_pid = None
323
Todd Fiala7306cf32014-07-29 22:30:01 +0000324 if self._inferior_startup == self._STARTUP_ATTACH or self._inferior_startup == self._STARTUP_ATTACH_MANUALLY:
Todd Fialae50b2e42014-06-13 19:11:33 +0000325 # Launch the process that we'll use as the inferior.
326 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds)
327 self.assertIsNotNone(inferior)
328 self.assertTrue(inferior.pid > 0)
Todd Fiala7306cf32014-07-29 22:30:01 +0000329 if self._inferior_startup == self._STARTUP_ATTACH:
330 # In this case, we want the stub to attach via the command line, so set the command line attach pid here.
331 attach_pid = inferior.pid
Todd Fialae50b2e42014-06-13 19:11:33 +0000332
333 # Launch the debug monitor stub, attaching to the inferior.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000334 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000335 self.assertIsNotNone(server)
336
337 if self._inferior_startup == self._STARTUP_LAUNCH:
338 # Build launch args
339 launch_args = [os.path.abspath('a.out')]
340 if inferior_args:
341 launch_args.extend(inferior_args)
342
343 # Build the expected protocol stream
344 self.add_no_ack_remote_stream()
345 if self._inferior_startup == self._STARTUP_LAUNCH:
346 self.add_verified_launch_packets(launch_args)
347
348 return {"inferior":inferior, "server":server}
349
Todd Fiala31bde322014-07-26 20:39:17 +0000350 def expect_socket_recv(self, sock, expected_content_regex, timeout_seconds):
351 response = ""
352 timeout_time = time.time() + timeout_seconds
353
354 while not expected_content_regex.match(response) and time.time() < timeout_time:
355 can_read, _, _ = select.select([sock], [], [], timeout_seconds)
356 if can_read and sock in can_read:
357 recv_bytes = sock.recv(4096)
358 if recv_bytes:
359 response += recv_bytes
360
361 self.assertTrue(expected_content_regex.match(response))
362
363 def expect_socket_send(self, sock, content, timeout_seconds):
364 request_bytes_remaining = content
365 timeout_time = time.time() + timeout_seconds
366
367 while len(request_bytes_remaining) > 0 and time.time() < timeout_time:
368 _, can_write, _ = select.select([], [sock], [], timeout_seconds)
369 if can_write and sock in can_write:
370 written_byte_count = sock.send(request_bytes_remaining)
371 request_bytes_remaining = request_bytes_remaining[written_byte_count:]
372 self.assertEquals(len(request_bytes_remaining), 0)
373
374 def do_handshake(self, stub_socket, timeout_seconds=5):
375 # Write the ack.
376 self.expect_socket_send(stub_socket, "+", timeout_seconds)
377
378 # Send the start no ack mode packet.
379 NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0"
380 bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST)
381 self.assertEquals(bytes_sent, len(NO_ACK_MODE_REQUEST))
382
383 # Receive the ack and "OK"
384 self.expect_socket_recv(stub_socket, re.compile(r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds)
385
386 # Send the final ack.
387 self.expect_socket_send(stub_socket, "+", timeout_seconds)
388
Todd Fialae50b2e42014-06-13 19:11:33 +0000389 def add_no_ack_remote_stream(self):
390 self.test_sequence.add_log_lines(
391 ["read packet: +",
392 "read packet: $QStartNoAckMode#b0",
393 "send packet: +",
394 "send packet: $OK#9a",
395 "read packet: +"],
396 True)
397
398 def add_verified_launch_packets(self, launch_args):
399 self.test_sequence.add_log_lines(
400 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
401 "send packet: $OK#00",
402 "read packet: $qLaunchSuccess#a5",
403 "send packet: $OK#00"],
404 True)
405
406 def add_thread_suffix_request_packets(self):
407 self.test_sequence.add_log_lines(
408 ["read packet: $QThreadSuffixSupported#00",
409 "send packet: $OK#00",
410 ], True)
411
412 def add_process_info_collection_packets(self):
413 self.test_sequence.add_log_lines(
414 ["read packet: $qProcessInfo#00",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000415 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000416 True)
417
418 _KNOWN_PROCESS_INFO_KEYS = [
419 "pid",
420 "parent-pid",
421 "real-uid",
422 "real-gid",
423 "effective-uid",
424 "effective-gid",
425 "cputype",
426 "cpusubtype",
427 "ostype",
428 "vendor",
429 "endian",
430 "ptrsize"
431 ]
432
433 def parse_process_info_response(self, context):
434 # Ensure we have a process info response.
435 self.assertIsNotNone(context)
436 process_info_raw = context.get("process_info_raw")
437 self.assertIsNotNone(process_info_raw)
438
439 # Pull out key:value; pairs.
440 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
441
442 # Validate keys are known.
443 for (key, val) in process_info_dict.items():
444 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
445 self.assertIsNotNone(val)
446
447 return process_info_dict
448
449 def add_register_info_collection_packets(self):
450 self.test_sequence.add_log_lines(
451 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
452 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
453 "save_key":"reg_info_responses" } ],
454 True)
455
456 def parse_register_info_packets(self, context):
457 """Return an array of register info dictionaries, one per register info."""
458 reg_info_responses = context.get("reg_info_responses")
459 self.assertIsNotNone(reg_info_responses)
460
461 # Parse register infos.
462 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
463
Todd Fiala50a211b2014-06-14 22:00:36 +0000464 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000465 if not timeout_seconds:
466 timeout_seconds = self._TIMEOUT_SECONDS
467 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000468
469 _KNOWN_REGINFO_KEYS = [
470 "name",
471 "alt-name",
472 "bitsize",
473 "offset",
474 "encoding",
475 "format",
476 "set",
477 "gcc",
478 "dwarf",
479 "generic",
480 "container-regs",
481 "invalidate-regs"
482 ]
483
484 def assert_valid_reg_info(self, reg_info):
485 # Assert we know about all the reginfo keys parsed.
486 for key in reg_info:
487 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
488
489 # Check the bare-minimum expected set of register info keys.
490 self.assertTrue("name" in reg_info)
491 self.assertTrue("bitsize" in reg_info)
492 self.assertTrue("offset" in reg_info)
493 self.assertTrue("encoding" in reg_info)
494 self.assertTrue("format" in reg_info)
495
496 def find_pc_reg_info(self, reg_infos):
497 lldb_reg_index = 0
498 for reg_info in reg_infos:
499 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
500 return (lldb_reg_index, reg_info)
501 lldb_reg_index += 1
502
503 return (None, None)
504
505 def add_lldb_register_index(self, reg_infos):
506 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
507
508 We'll use this when we want to call packets like P/p with a register index but do so
509 on only a subset of the full register info set.
510 """
511 self.assertIsNotNone(reg_infos)
512
513 reg_index = 0
514 for reg_info in reg_infos:
515 reg_info["lldb_register_index"] = reg_index
516 reg_index += 1
517
518 def add_query_memory_region_packets(self, address):
519 self.test_sequence.add_log_lines(
520 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
521 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
522 True)
523
Todd Fialac30281a2014-06-14 03:03:23 +0000524 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000525 self.assertIsNotNone(key_val_text)
526 kv_dict = {}
527 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000528 key = match.group(1)
529 val = match.group(2)
530 if key in kv_dict:
531 if allow_dupes:
532 if type(kv_dict[key]) == list:
533 kv_dict[key].append(val)
534 else:
535 # Promote to list
536 kv_dict[key] = [kv_dict[key], val]
537 else:
538 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
539 else:
540 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000541 return kv_dict
542
543 def parse_memory_region_packet(self, context):
544 # Ensure we have a context.
545 self.assertIsNotNone(context.get("memory_region_response"))
546
547 # Pull out key:value; pairs.
548 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
549
550 # Validate keys are known.
551 for (key, val) in mem_region_dict.items():
552 self.assertTrue(key in ["start", "size", "permissions", "error"])
553 self.assertIsNotNone(val)
554
555 # Return the dictionary of key-value pairs for the memory region.
556 return mem_region_dict
557
558 def assert_address_within_memory_region(self, test_address, mem_region_dict):
559 self.assertIsNotNone(mem_region_dict)
560 self.assertTrue("start" in mem_region_dict)
561 self.assertTrue("size" in mem_region_dict)
562
563 range_start = int(mem_region_dict["start"], 16)
564 range_size = int(mem_region_dict["size"], 16)
565 range_end = range_start + range_size
566
567 if test_address < range_start:
568 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
569 elif test_address >= range_end:
570 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
571
572 def add_threadinfo_collection_packets(self):
573 self.test_sequence.add_log_lines(
574 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
575 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
576 "save_key":"threadinfo_responses" } ],
577 True)
578
579 def parse_threadinfo_packets(self, context):
580 """Return an array of thread ids (decimal ints), one per thread."""
581 threadinfo_responses = context.get("threadinfo_responses")
582 self.assertIsNotNone(threadinfo_responses)
583
584 thread_ids = []
585 for threadinfo_response in threadinfo_responses:
586 new_thread_infos = parse_threadinfo_response(threadinfo_response)
587 thread_ids.extend(new_thread_infos)
588 return thread_ids
589
590 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
591 start_time = time.time()
592 timeout_time = start_time + timeout_seconds
593
594 actual_thread_count = 0
595 while actual_thread_count < thread_count:
596 self.reset_test_sequence()
597 self.add_threadinfo_collection_packets()
598
599 context = self.expect_gdbremote_sequence()
600 self.assertIsNotNone(context)
601
602 threads = self.parse_threadinfo_packets(context)
603 self.assertIsNotNone(threads)
604
605 actual_thread_count = len(threads)
606
607 if time.time() > timeout_time:
608 raise Exception(
609 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
610 timeout_seconds, thread_count, actual_thread_count))
611
612 return threads
613
614 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
615 self.test_sequence.add_log_lines(
616 [# Set the breakpoint.
617 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
618 # Verify the stub could set it.
619 "send packet: $OK#00",
620 ], True)
621
622 if (do_continue):
623 self.test_sequence.add_log_lines(
624 [# Continue the inferior.
625 "read packet: $c#00",
626 # Expect a breakpoint stop report.
627 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
628 ], True)
629
630 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
631 self.test_sequence.add_log_lines(
632 [# Remove the breakpoint.
633 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
634 # Verify the stub could unset it.
635 "send packet: $OK#00",
636 ], True)
637
638 def add_qSupported_packets(self):
639 self.test_sequence.add_log_lines(
640 ["read packet: $qSupported#00",
641 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
642 ], True)
643
644 _KNOWN_QSUPPORTED_STUB_FEATURES = [
645 "augmented-libraries-svr4-read",
646 "PacketSize",
647 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000648 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000649 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000650 "qXfer:auxv:read",
651 "qXfer:libraries:read",
652 "qXfer:libraries-svr4:read",
653 ]
654
655 def parse_qSupported_response(self, context):
656 self.assertIsNotNone(context)
657
658 raw_response = context.get("qSupported_response")
659 self.assertIsNotNone(raw_response)
660
661 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
662 # +,-,? is stripped from the key and set as the value.
663 supported_dict = {}
664 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
665 key = match.group(1)
666 val = match.group(3)
667
668 # key=val: store as is
669 if val and len(val) > 0:
670 supported_dict[key] = val
671 else:
672 if len(key) < 2:
673 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
674 supported_type = key[-1]
675 key = key[:-1]
676 if not supported_type in ["+", "-", "?"]:
677 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
678 supported_dict[key] = supported_type
679 # Ensure we know the supported element
680 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
681 raise Exception("unknown qSupported stub feature reported: %s" % key)
682
683 return supported_dict
684
685 def run_process_then_stop(self, run_seconds=1):
686 # Tell the stub to continue.
687 self.test_sequence.add_log_lines(
688 ["read packet: $vCont;c#00"],
689 True)
690 context = self.expect_gdbremote_sequence()
691
692 # Wait for run_seconds.
693 time.sleep(run_seconds)
694
695 # Send an interrupt, capture a T response.
696 self.reset_test_sequence()
697 self.test_sequence.add_log_lines(
698 ["read packet: {}".format(chr(03)),
699 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
700 True)
701 context = self.expect_gdbremote_sequence()
702 self.assertIsNotNone(context)
703 self.assertIsNotNone(context.get("stop_result"))
704
705 return context
706
707 def select_modifiable_register(self, reg_infos):
708 """Find a register that can be read/written freely."""
709 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
710
711 # First check for the first register from the preferred register name set.
712 alternative_register_index = None
713
714 self.assertIsNotNone(reg_infos)
715 for reg_info in reg_infos:
716 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
717 # We found a preferred register. Use it.
718 return reg_info["lldb_register_index"]
719 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
720 # A frame pointer register will do as a register to modify temporarily.
721 alternative_register_index = reg_info["lldb_register_index"]
722
723 # We didn't find a preferred register. Return whatever alternative register
724 # we found, if any.
725 return alternative_register_index
726
727 def extract_registers_from_stop_notification(self, stop_key_vals_text):
728 self.assertIsNotNone(stop_key_vals_text)
729 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
730
731 registers = {}
732 for (key, val) in kv_dict.items():
733 if re.match(r"^[0-9a-fA-F]+", key):
734 registers[int(key, 16)] = val
735 return registers
736
737 def gather_register_infos(self):
738 self.reset_test_sequence()
739 self.add_register_info_collection_packets()
740
741 context = self.expect_gdbremote_sequence()
742 self.assertIsNotNone(context)
743
744 reg_infos = self.parse_register_info_packets(context)
745 self.assertIsNotNone(reg_infos)
746 self.add_lldb_register_index(reg_infos)
747
748 return reg_infos
749
750 def find_generic_register_with_name(self, reg_infos, generic_name):
751 self.assertIsNotNone(reg_infos)
752 for reg_info in reg_infos:
753 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
754 return reg_info
755 return None
756
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000757 def decode_gdbremote_binary(self, encoded_bytes):
758 decoded_bytes = ""
759 i = 0
760 while i < len(encoded_bytes):
761 if encoded_bytes[i] == "}":
762 # Handle escaped char.
763 self.assertTrue(i + 1 < len(encoded_bytes))
764 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
765 i +=2
766 elif encoded_bytes[i] == "*":
767 # Handle run length encoding.
768 self.assertTrue(len(decoded_bytes) > 0)
769 self.assertTrue(i + 1 < len(encoded_bytes))
770 repeat_count = ord(encoded_bytes[i+1]) - 29
771 decoded_bytes += decoded_bytes[-1] * repeat_count
772 i += 2
773 else:
774 decoded_bytes += encoded_bytes[i]
775 i += 1
776 return decoded_bytes
777
778 def build_auxv_dict(self, endian, word_size, auxv_data):
779 self.assertIsNotNone(endian)
780 self.assertIsNotNone(word_size)
781 self.assertIsNotNone(auxv_data)
782
783 auxv_dict = {}
784
785 while len(auxv_data) > 0:
786 # Chop off key.
787 raw_key = auxv_data[:word_size]
788 auxv_data = auxv_data[word_size:]
789
790 # Chop of value.
791 raw_value = auxv_data[:word_size]
792 auxv_data = auxv_data[word_size:]
793
794 # Convert raw text from target endian.
795 key = unpack_endian_binary_string(endian, raw_key)
796 value = unpack_endian_binary_string(endian, raw_value)
797
798 # Handle ending entry.
799 if key == 0:
800 self.assertEquals(value, 0)
801 return auxv_dict
802
803 # The key should not already be present.
804 self.assertFalse(key in auxv_dict)
805 auxv_dict[key] = value
806
807 self.fail("should not reach here - implies required double zero entry not found")
808 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000809
810 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
811 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
812 offset = 0
813 done = False
814 decoded_data = ""
815
816 while not done:
817 # Grab the next iteration of data.
818 self.reset_test_sequence()
819 self.test_sequence.add_log_lines([
820 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000821 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000822 ], True)
823
824 context = self.expect_gdbremote_sequence()
825 self.assertIsNotNone(context)
826
827 response_type = context.get("response_type")
828 self.assertIsNotNone(response_type)
829 self.assertTrue(response_type in ["l", "m"])
830
831 # Move offset along.
832 offset += chunk_length
833
834 # Figure out if we're done. We're done if the response type is l.
835 done = response_type == "l"
836
837 # Decode binary data.
838 content_raw = context.get("content_raw")
839 if content_raw and len(content_raw) > 0:
840 self.assertIsNotNone(content_raw)
841 decoded_data += self.decode_gdbremote_binary(content_raw)
842 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000843
844 def add_interrupt_packets(self):
845 self.test_sequence.add_log_lines([
846 # Send the intterupt.
847 "read packet: {}".format(chr(03)),
848 # And wait for the stop notification.
849 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
850 ], True)
851
852 def parse_interrupt_packets(self, context):
853 self.assertIsNotNone(context.get("stop_signo"))
854 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000855 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
856
857 def add_QSaveRegisterState_packets(self, thread_id):
858 if thread_id:
859 # Use the thread suffix form.
860 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
861 else:
862 request = "read packet: $QSaveRegisterState#00"
863
864 self.test_sequence.add_log_lines([
865 request,
866 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
867 ], True)
868
869 def parse_QSaveRegisterState_response(self, context):
870 self.assertIsNotNone(context)
871
872 save_response = context.get("save_response")
873 self.assertIsNotNone(save_response)
874
875 if len(save_response) < 1 or save_response[0] == "E":
876 # error received
877 return (False, None)
878 else:
879 return (True, int(save_response))
880
881 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
882 if thread_id:
883 # Use the thread suffix form.
884 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
885 else:
886 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
887
888 self.test_sequence.add_log_lines([
889 request,
890 "send packet: $OK#00"
891 ], True)
892
893 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
894 self.assertIsNotNone(reg_infos)
895
896 successful_writes = 0
897 failed_writes = 0
898
899 for reg_info in reg_infos:
900 # Use the lldb register index added to the reg info. We're not necessarily
901 # working off a full set of register infos, so an inferred register index could be wrong.
902 reg_index = reg_info["lldb_register_index"]
903 self.assertIsNotNone(reg_index)
904
905 reg_byte_size = int(reg_info["bitsize"])/8
906 self.assertTrue(reg_byte_size > 0)
907
908 # Handle thread suffix.
909 if thread_id:
910 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
911 else:
912 p_request = "read packet: $p{:x}#00".format(reg_index)
913
914 # Read the existing value.
915 self.reset_test_sequence()
916 self.test_sequence.add_log_lines([
917 p_request,
918 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
919 ], True)
920 context = self.expect_gdbremote_sequence()
921 self.assertIsNotNone(context)
922
923 # Verify the response length.
924 p_response = context.get("p_response")
925 self.assertIsNotNone(p_response)
926 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
927
928 # Flip the value by xoring with all 1s
929 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
930 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
931 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
932
933 # Handle thread suffix for P.
934 if thread_id:
935 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
936 else:
937 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
938
939 # Write the flipped value to the register.
940 self.reset_test_sequence()
941 self.test_sequence.add_log_lines([
942 P_request,
943 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
944 ], True)
945 context = self.expect_gdbremote_sequence()
946 self.assertIsNotNone(context)
947
948 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
949 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
950 # all flipping perfectly.
951 P_response = context.get("P_response")
952 self.assertIsNotNone(P_response)
953 if P_response == "OK":
954 successful_writes += 1
955 else:
956 failed_writes += 1
957 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
958
959 # Read back the register value, ensure it matches the flipped value.
960 if P_response == "OK":
961 self.reset_test_sequence()
962 self.test_sequence.add_log_lines([
963 p_request,
964 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
965 ], True)
966 context = self.expect_gdbremote_sequence()
967 self.assertIsNotNone(context)
968
969 verify_p_response_raw = context.get("p_response")
970 self.assertIsNotNone(verify_p_response_raw)
971 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
972
973 if verify_bits != flipped_bits_int:
974 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
975 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
976 successful_writes -= 1
977 failed_writes +=1
978
979 return (successful_writes, failed_writes)
980
981 def is_bit_flippable_register(self, reg_info):
982 if not reg_info:
983 return False
984 if not "set" in reg_info:
985 return False
986 if reg_info["set"] != "General Purpose Registers":
987 return False
988 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
989 # Don't try to bit flip registers contained in another register.
990 return False
991 if re.match("^.s$", reg_info["name"]):
992 # This is a 2-letter register name that ends in "s", like a segment register.
993 # Don't try to bit flip these.
994 return False
995 # Okay, this looks fine-enough.
996 return True
997
998 def read_register_values(self, reg_infos, endian, thread_id=None):
999 self.assertIsNotNone(reg_infos)
1000 values = {}
1001
1002 for reg_info in reg_infos:
1003 # We append a register index when load reg infos so we can work with subsets.
1004 reg_index = reg_info.get("lldb_register_index")
1005 self.assertIsNotNone(reg_index)
1006
1007 # Handle thread suffix.
1008 if thread_id:
1009 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
1010 else:
1011 p_request = "read packet: $p{:x}#00".format(reg_index)
1012
1013 # Read it with p.
1014 self.reset_test_sequence()
1015 self.test_sequence.add_log_lines([
1016 p_request,
1017 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
1018 ], True)
1019 context = self.expect_gdbremote_sequence()
1020 self.assertIsNotNone(context)
1021
1022 # Convert value from target endian to integral.
1023 p_response = context.get("p_response")
1024 self.assertIsNotNone(p_response)
1025 self.assertTrue(len(p_response) > 0)
1026 self.assertFalse(p_response[0] == "E")
1027
1028 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
1029
Todd Fialae2202002014-06-27 22:11:56 +00001030 return values
1031
1032 def add_vCont_query_packets(self):
1033 self.test_sequence.add_log_lines([
1034 "read packet: $vCont?#00",
1035 {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } },
1036 ], True)
1037
1038 def parse_vCont_query_response(self, context):
1039 self.assertIsNotNone(context)
1040 vCont_query_response = context.get("vCont_query_response")
1041
1042 # Handle case of no vCont support at all - in which case the capture group will be none or zero length.
1043 if not vCont_query_response or len(vCont_query_response) == 0:
1044 return {}
1045
1046 return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0}
1047
1048 def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"):
1049 """Used by single step test that appears in a few different contexts."""
1050 single_step_count = 0
1051
1052 while single_step_count < max_step_count:
1053 self.assertIsNotNone(thread_id)
1054
1055 # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id.
1056 step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
1057 # print "\nstep_packet created: {}\n".format(step_packet)
1058
1059 # Single step.
1060 self.reset_test_sequence()
1061 if use_Hc_packet:
1062 self.test_sequence.add_log_lines(
1063 [# Set the continue thread.
1064 "read packet: $Hc{0:x}#00".format(thread_id),
1065 "send packet: $OK#00",
1066 ], True)
1067 self.test_sequence.add_log_lines([
1068 # Single step.
1069 step_packet,
1070 # "read packet: $vCont;s:{0:x}#00".format(thread_id),
1071 # Expect a breakpoint stop report.
1072 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
1073 ], True)
1074 context = self.expect_gdbremote_sequence()
1075 self.assertIsNotNone(context)
1076 self.assertIsNotNone(context.get("stop_signo"))
1077 self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP)
1078
1079 single_step_count += 1
1080
1081 # See if the predicate is true. If so, we're done.
1082 if predicate(args):
1083 return (True, single_step_count)
1084
1085 # The predicate didn't return true within the runaway step count.
1086 return (False, single_step_count)
1087
1088 def g_c1_c2_contents_are(self, args):
1089 """Used by single step test that appears in a few different contexts."""
1090 g_c1_address = args["g_c1_address"]
1091 g_c2_address = args["g_c2_address"]
1092 expected_g_c1 = args["expected_g_c1"]
1093 expected_g_c2 = args["expected_g_c2"]
1094
1095 # Read g_c1 and g_c2 contents.
1096 self.reset_test_sequence()
1097 self.test_sequence.add_log_lines(
1098 ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
1099 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} },
1100 "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
1101 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }],
1102 True)
1103
1104 # Run the packet stream.
1105 context = self.expect_gdbremote_sequence()
1106 self.assertIsNotNone(context)
1107
1108 # Check if what we read from inferior memory is what we are expecting.
1109 self.assertIsNotNone(context.get("g_c1_contents"))
1110 self.assertIsNotNone(context.get("g_c2_contents"))
1111
1112 return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2)
1113
1114 def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"):
1115 """Used by single step test that appears in a few different contexts."""
1116 # Start up the inferior.
1117 procs = self.prep_debug_monitor_and_inferior(
1118 inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"])
1119
1120 # Run the process
1121 self.test_sequence.add_log_lines(
1122 [# Start running after initial stop.
1123 "read packet: $c#00",
1124 # Match output line that prints the memory address of the function call entry point.
1125 # Note we require launch-only testing so we can get inferior otuput.
1126 { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
1127 "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} },
1128 # Now stop the inferior.
1129 "read packet: {}".format(chr(03)),
1130 # And wait for the stop notification.
1131 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }],
1132 True)
1133
1134 # Run the packet stream.
1135 context = self.expect_gdbremote_sequence()
1136 self.assertIsNotNone(context)
1137
1138 # Grab the main thread id.
1139 self.assertIsNotNone(context.get("stop_thread_id"))
1140 main_thread_id = int(context.get("stop_thread_id"), 16)
1141
1142 # Grab the function address.
1143 self.assertIsNotNone(context.get("function_address"))
1144 function_address = int(context.get("function_address"), 16)
1145
1146 # Grab the data addresses.
1147 self.assertIsNotNone(context.get("g_c1_address"))
1148 g_c1_address = int(context.get("g_c1_address"), 16)
1149
1150 self.assertIsNotNone(context.get("g_c2_address"))
1151 g_c2_address = int(context.get("g_c2_address"), 16)
1152
1153 # Set a breakpoint at the given address.
1154 # Note this might need to be switched per platform (ARM, mips, etc.).
1155 BREAKPOINT_KIND = 1
1156 self.reset_test_sequence()
1157 self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND)
1158 context = self.expect_gdbremote_sequence()
1159 self.assertIsNotNone(context)
1160
1161 # Remove the breakpoint.
1162 self.reset_test_sequence()
1163 self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND)
1164 context = self.expect_gdbremote_sequence()
1165 self.assertIsNotNone(context)
1166
1167 # Verify g_c1 and g_c2 match expected initial state.
1168 args = {}
1169 args["g_c1_address"] = g_c1_address
1170 args["g_c2_address"] = g_c2_address
1171 args["expected_g_c1"] = "0"
1172 args["expected_g_c2"] = "1"
1173
1174 self.assertTrue(self.g_c1_c2_contents_are(args))
1175
1176 # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code.
1177 args["expected_g_c1"] = "1"
1178 args["expected_g_c2"] = "1"
1179 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1180 self.assertTrue(state_reached)
1181
1182 # Verify we hit the next state.
1183 args["expected_g_c1"] = "1"
1184 args["expected_g_c2"] = "0"
1185 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1186 self.assertTrue(state_reached)
1187 self.assertEquals(step_count, 1)
1188
1189 # Verify we hit the next state.
1190 args["expected_g_c1"] = "0"
1191 args["expected_g_c2"] = "0"
1192 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1193 self.assertTrue(state_reached)
1194 self.assertEquals(step_count, 1)
1195
1196 # Verify we hit the next state.
1197 args["expected_g_c1"] = "0"
1198 args["expected_g_c2"] = "1"
1199 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1200 self.assertTrue(state_reached)
1201 self.assertEquals(step_count, 1)
Todd Fialaaf245d12014-06-30 21:05:18 +00001202