blob: afc03c8e3672e04354f81c5f54ebb942f4d6f9d0 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fiala24189d42014-07-14 06:24:44 +00006import os
7import os.path
Todd Fialae50b2e42014-06-13 19:11:33 +00008import platform
Todd Fiala9e2d3292014-07-09 23:10:43 +00009import random
Todd Fialae2202002014-06-27 22:11:56 +000010import re
Todd Fiala31bde322014-07-26 20:39:17 +000011import select
Todd Fialae50b2e42014-06-13 19:11:33 +000012import sets
13import signal
14import socket
15import subprocess
16import sys
Todd Fiala24189d42014-07-14 06:24:44 +000017import tempfile
Todd Fialae50b2e42014-06-13 19:11:33 +000018import time
Todd Fiala24189d42014-07-14 06:24:44 +000019import unittest2
Todd Fialae50b2e42014-06-13 19:11:33 +000020from lldbtest import *
21from lldbgdbserverutils import *
22import logging
Todd Fialae50b2e42014-06-13 19:11:33 +000023
24class GdbRemoteTestCaseBase(TestBase):
25
26 mydir = TestBase.compute_mydir(__file__)
27
Todd Fialae50b2e42014-06-13 19:11:33 +000028 _TIMEOUT_SECONDS = 5
29
30 _GDBREMOTE_KILL_PACKET = "$k#6b"
31
32 _LOGGING_LEVEL = logging.WARNING
33 # _LOGGING_LEVEL = logging.DEBUG
34
Todd Fiala7306cf32014-07-29 22:30:01 +000035 # Start the inferior separately, attach to the inferior on the stub command line.
Todd Fialae50b2e42014-06-13 19:11:33 +000036 _STARTUP_ATTACH = "attach"
Todd Fiala7306cf32014-07-29 22:30:01 +000037 # Start the inferior separately, start the stub without attaching, allow the test to attach to the inferior however it wants (e.g. $vAttach;pid).
38 _STARTUP_ATTACH_MANUALLY = "attach_manually"
39 # Start the stub, and launch the inferior with an $A packet via the initial packet stream.
Todd Fialae50b2e42014-06-13 19:11:33 +000040 _STARTUP_LAUNCH = "launch"
41
42 # GDB Signal numbers that are not target-specific used for common exceptions
43 TARGET_EXC_BAD_ACCESS = 0x91
44 TARGET_EXC_BAD_INSTRUCTION = 0x92
45 TARGET_EXC_ARITHMETIC = 0x93
46 TARGET_EXC_EMULATION = 0x94
47 TARGET_EXC_SOFTWARE = 0x95
48 TARGET_EXC_BREAKPOINT = 0x96
49
50 def setUp(self):
51 TestBase.setUp(self)
52 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
53 logging.basicConfig(format=FORMAT)
54 self.logger = logging.getLogger(__name__)
55 self.logger.setLevel(self._LOGGING_LEVEL)
56 self.test_sequence = GdbRemoteTestSequence(self.logger)
57 self.set_inferior_startup_launch()
Todd Fiala9e2d3292014-07-09 23:10:43 +000058 self.port = self.get_next_port()
Todd Fiala67041192014-07-11 22:50:13 +000059 self.named_pipe_path = None
Todd Fiala24189d42014-07-14 06:24:44 +000060 self.named_pipe = None
61 self.named_pipe_fd = None
Todd Fialaf9ad21d2014-07-16 16:15:42 +000062 self.stub_sends_two_stop_notifications_on_kill = False
Todd Fiala31bde322014-07-26 20:39:17 +000063 self.stub_hostname = "localhost"
Todd Fialae50b2e42014-06-13 19:11:33 +000064
Todd Fiala9e2d3292014-07-09 23:10:43 +000065 def get_next_port(self):
66 return 12000 + random.randint(0,3999)
Todd Fialae50b2e42014-06-13 19:11:33 +000067
68 def reset_test_sequence(self):
69 self.test_sequence = GdbRemoteTestSequence(self.logger)
70
Todd Fiala24189d42014-07-14 06:24:44 +000071 def create_named_pipe(self):
72 # Create a temp dir and name for a pipe.
73 temp_dir = tempfile.mkdtemp()
74 named_pipe_path = os.path.join(temp_dir, "stub_port_number")
75
76 # Create the named pipe.
77 os.mkfifo(named_pipe_path)
78
79 # Open the read side of the pipe in non-blocking mode. This will return right away, ready or not.
80 named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK)
81
82 # Create the file for the named pipe. Note this will follow semantics of
83 # a non-blocking read side of a named pipe, which has different semantics
84 # than a named pipe opened for read in non-blocking mode.
85 named_pipe = os.fdopen(named_pipe_fd, "r")
86 self.assertIsNotNone(named_pipe)
87
88 def shutdown_named_pipe():
89 # Close the pipe.
90 try:
91 named_pipe.close()
92 except:
93 print "failed to close named pipe"
94 None
95
96 # Delete the pipe.
97 try:
98 os.remove(named_pipe_path)
99 except:
100 print "failed to delete named pipe: {}".format(named_pipe_path)
101 None
102
103 # Delete the temp directory.
104 try:
105 os.rmdir(temp_dir)
106 except:
107 print "failed to delete temp dir: {}, directory contents: '{}'".format(temp_dir, os.listdir(temp_dir))
108 None
109
110 # Add the shutdown hook to clean up the named pipe.
111 self.addTearDownHook(shutdown_named_pipe)
112
113 # Clear the port so the stub selects a port number.
114 self.port = 0
115
116 return (named_pipe_path, named_pipe, named_pipe_fd)
117
118 def get_stub_port_from_named_socket(self, read_timeout_seconds=5):
119 # Wait for something to read with a max timeout.
120 (ready_readers, _, _) = select.select([self.named_pipe_fd], [], [], read_timeout_seconds)
121 self.assertIsNotNone(ready_readers, "write side of pipe has not written anything - stub isn't writing to pipe.")
122 self.assertNotEqual(len(ready_readers), 0, "write side of pipe has not written anything - stub isn't writing to pipe.")
123
124 # Read the port from the named pipe.
125 stub_port_raw = self.named_pipe.read()
126 self.assertIsNotNone(stub_port_raw)
127 self.assertNotEqual(len(stub_port_raw), 0, "no content to read on pipe")
128
129 # Trim null byte, convert to int.
130 stub_port_raw = stub_port_raw[:-1]
131 stub_port = int(stub_port_raw)
132 self.assertTrue(stub_port > 0)
133
134 return stub_port
135
136 def init_llgs_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000137 self.debug_monitor_exe = get_lldb_gdbserver_exe()
138 if not self.debug_monitor_exe:
139 self.skipTest("lldb_gdbserver exe not found")
Todd Fiala8aae4f42014-06-13 23:34:17 +0000140 self.debug_monitor_extra_args = " -c 'log enable -T -f process-{}.log lldb break process thread' -c 'log enable -T -f packets-{}.log gdb-remote packets'".format(self.id(), self.id(), self.id())
Todd Fiala24189d42014-07-14 06:24:44 +0000141 if use_named_pipe:
142 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialae50b2e42014-06-13 19:11:33 +0000143
Todd Fiala24189d42014-07-14 06:24:44 +0000144 def init_debugserver_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000145 self.debug_monitor_exe = get_debugserver_exe()
146 if not self.debug_monitor_exe:
147 self.skipTest("debugserver exe not found")
148 self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName)
Todd Fiala24189d42014-07-14 06:24:44 +0000149 if use_named_pipe:
150 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialaf9ad21d2014-07-16 16:15:42 +0000151 # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification
152 # when the process truly dies.
153 self.stub_sends_two_stop_notifications_on_kill = True
Todd Fialae50b2e42014-06-13 19:11:33 +0000154
155 def create_socket(self):
156 sock = socket.socket()
157 logger = self.logger
158
159 def shutdown_socket():
160 if sock:
161 try:
162 # send the kill packet so lldb-gdbserver shuts down gracefully
163 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
164 except:
165 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
166
167 try:
168 sock.close()
169 except:
170 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
171
172 self.addTearDownHook(shutdown_socket)
173
Todd Fiala31bde322014-07-26 20:39:17 +0000174 connect_info = (self.stub_hostname, self.port)
Todd Fiala24189d42014-07-14 06:24:44 +0000175 # print "connecting to stub on {}:{}".format(connect_info[0], connect_info[1])
176 sock.connect(connect_info)
177
Todd Fialae50b2e42014-06-13 19:11:33 +0000178 return sock
179
180 def set_inferior_startup_launch(self):
181 self._inferior_startup = self._STARTUP_LAUNCH
182
183 def set_inferior_startup_attach(self):
184 self._inferior_startup = self._STARTUP_ATTACH
185
Todd Fiala7306cf32014-07-29 22:30:01 +0000186 def set_inferior_startup_attach_manually(self):
187 self._inferior_startup = self._STARTUP_ATTACH_MANUALLY
188
Todd Fiala31bde322014-07-26 20:39:17 +0000189 def get_debug_monitor_command_line(self, attach_pid=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000190 commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port)
191 if attach_pid:
192 commandline += " --attach=%d" % attach_pid
Todd Fiala67041192014-07-11 22:50:13 +0000193 if self.named_pipe_path:
194 commandline += " --named-pipe %s" % self.named_pipe_path
Todd Fiala31bde322014-07-26 20:39:17 +0000195 return commandline
196
197 def launch_debug_monitor(self, attach_pid=None, logfile=None):
198 # Create the command line.
199 import pexpect
200 commandline = self.get_debug_monitor_command_line(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000201
Todd Fiala8aae4f42014-06-13 23:34:17 +0000202 # Start the server.
Todd Fiala31bde322014-07-26 20:39:17 +0000203 server = pexpect.spawn(commandline, logfile=logfile)
Todd Fiala24189d42014-07-14 06:24:44 +0000204 self.assertIsNotNone(server)
205 server.expect(r"(debugserver|lldb-gdbserver)", timeout=10)
206
207 # If we're receiving the stub's listening port from the named pipe, do that here.
208 if self.named_pipe:
209 self.port = self.get_stub_port_from_named_socket()
210 # print "debug server listening on {}".format(self.port)
Todd Fialae50b2e42014-06-13 19:11:33 +0000211
212 # Turn on logging for what the child sends back.
213 if self.TraceOn():
214 server.logfile_read = sys.stdout
215
Todd Fiala8aae4f42014-06-13 23:34:17 +0000216 return server
217
218 def connect_to_debug_monitor(self, attach_pid=None):
Todd Fiala24189d42014-07-14 06:24:44 +0000219 if self.named_pipe:
220 # Create the stub.
221 server = self.launch_debug_monitor(attach_pid=attach_pid)
222 self.assertIsNotNone(server)
223
224 def shutdown_debug_monitor():
225 try:
226 server.close()
227 except:
228 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
229 self.addTearDownHook(shutdown_debug_monitor)
230
231 # Schedule debug monitor to be shut down during teardown.
232 logger = self.logger
233
234 # Attach to the stub and return a socket opened to it.
235 self.sock = self.create_socket()
236 return server
237
238 # We're using a random port algorithm to try not to collide with other ports,
239 # and retry a max # times.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000240 attempts = 0
241 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000242
Todd Fiala8aae4f42014-06-13 23:34:17 +0000243 while attempts < MAX_ATTEMPTS:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000244 server = self.launch_debug_monitor(attach_pid=attach_pid)
245
246 # Wait until we receive the server ready message before continuing.
247 port_good = True
Todd Fiala8aae4f42014-06-13 23:34:17 +0000248 try:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000249 server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port))
250 except:
251 port_good = False
252 server.close()
Todd Fialae50b2e42014-06-13 19:11:33 +0000253
Todd Fiala9e2d3292014-07-09 23:10:43 +0000254 if port_good:
255 # Schedule debug monitor to be shut down during teardown.
256 logger = self.logger
257 def shutdown_debug_monitor():
258 try:
259 server.close()
260 except:
261 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
262 self.addTearDownHook(shutdown_debug_monitor)
Todd Fiala8aae4f42014-06-13 23:34:17 +0000263
Todd Fiala9e2d3292014-07-09 23:10:43 +0000264 # Create a socket to talk to the server
265 try:
266 self.sock = self.create_socket()
267 return server
268 except socket.error as serr:
269 # We're only trying to handle connection refused.
270 if serr.errno != errno.ECONNREFUSED:
271 raise serr
272 # We should close the server here to be safe.
273 server.close()
274
275 # Increment attempts.
276 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
277 attempts += 1
278
279 # And wait a random length of time before next attempt, to avoid collisions.
280 time.sleep(random.randint(1,5))
281
282 # Now grab a new port number.
283 self.port = self.get_next_port()
Todd Fiala8aae4f42014-06-13 23:34:17 +0000284
285 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000286
Todd Fiala58a2f662014-08-12 17:02:07 +0000287 def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3, exe_path=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000288 # We're going to start a child process that the debug monitor stub can later attach to.
289 # This process needs to be started so that it just hangs around for a while. We'll
290 # have it sleep.
Todd Fiala58a2f662014-08-12 17:02:07 +0000291 if not exe_path:
292 exe_path = os.path.abspath("a.out")
Todd Fialae50b2e42014-06-13 19:11:33 +0000293
294 args = [exe_path]
295 if inferior_args:
296 args.extend(inferior_args)
297 if sleep_seconds:
298 args.append("sleep:%d" % sleep_seconds)
299
300 return subprocess.Popen(args)
301
Todd Fiala58a2f662014-08-12 17:02:07 +0000302 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3, inferior_exe_path=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000303 """Prep the debug monitor, the inferior, and the expected packet stream.
304
305 Handle the separate cases of using the debug monitor in attach-to-inferior mode
306 and in launch-inferior mode.
307
308 For attach-to-inferior mode, the inferior process is first started, then
309 the debug monitor is started in attach to pid mode (using --attach on the
310 stub command line), and the no-ack-mode setup is appended to the packet
311 stream. The packet stream is not yet executed, ready to have more expected
312 packet entries added to it.
313
314 For launch-inferior mode, the stub is first started, then no ack mode is
315 setup on the expected packet stream, then the verified launch packets are added
316 to the expected socket stream. The packet stream is not yet executed, ready
317 to have more expected packet entries added to it.
318
319 The return value is:
320 {inferior:<inferior>, server:<server>}
321 """
322 inferior = None
323 attach_pid = None
324
Todd Fiala7306cf32014-07-29 22:30:01 +0000325 if self._inferior_startup == self._STARTUP_ATTACH or self._inferior_startup == self._STARTUP_ATTACH_MANUALLY:
Todd Fialae50b2e42014-06-13 19:11:33 +0000326 # Launch the process that we'll use as the inferior.
Todd Fiala58a2f662014-08-12 17:02:07 +0000327 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds, exe_path=inferior_exe_path)
Todd Fialae50b2e42014-06-13 19:11:33 +0000328 self.assertIsNotNone(inferior)
329 self.assertTrue(inferior.pid > 0)
Todd Fiala7306cf32014-07-29 22:30:01 +0000330 if self._inferior_startup == self._STARTUP_ATTACH:
331 # In this case, we want the stub to attach via the command line, so set the command line attach pid here.
332 attach_pid = inferior.pid
Todd Fialae50b2e42014-06-13 19:11:33 +0000333
334 # Launch the debug monitor stub, attaching to the inferior.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000335 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000336 self.assertIsNotNone(server)
337
338 if self._inferior_startup == self._STARTUP_LAUNCH:
339 # Build launch args
Todd Fiala58a2f662014-08-12 17:02:07 +0000340 if not inferior_exe_path:
341 inferior_exe_path = os.path.abspath("a.out")
342 launch_args = [inferior_exe_path]
Todd Fialae50b2e42014-06-13 19:11:33 +0000343 if inferior_args:
344 launch_args.extend(inferior_args)
345
346 # Build the expected protocol stream
347 self.add_no_ack_remote_stream()
348 if self._inferior_startup == self._STARTUP_LAUNCH:
349 self.add_verified_launch_packets(launch_args)
350
351 return {"inferior":inferior, "server":server}
352
Todd Fiala31bde322014-07-26 20:39:17 +0000353 def expect_socket_recv(self, sock, expected_content_regex, timeout_seconds):
354 response = ""
355 timeout_time = time.time() + timeout_seconds
356
357 while not expected_content_regex.match(response) and time.time() < timeout_time:
358 can_read, _, _ = select.select([sock], [], [], timeout_seconds)
359 if can_read and sock in can_read:
360 recv_bytes = sock.recv(4096)
361 if recv_bytes:
362 response += recv_bytes
363
364 self.assertTrue(expected_content_regex.match(response))
365
366 def expect_socket_send(self, sock, content, timeout_seconds):
367 request_bytes_remaining = content
368 timeout_time = time.time() + timeout_seconds
369
370 while len(request_bytes_remaining) > 0 and time.time() < timeout_time:
371 _, can_write, _ = select.select([], [sock], [], timeout_seconds)
372 if can_write and sock in can_write:
373 written_byte_count = sock.send(request_bytes_remaining)
374 request_bytes_remaining = request_bytes_remaining[written_byte_count:]
375 self.assertEquals(len(request_bytes_remaining), 0)
376
377 def do_handshake(self, stub_socket, timeout_seconds=5):
378 # Write the ack.
379 self.expect_socket_send(stub_socket, "+", timeout_seconds)
380
381 # Send the start no ack mode packet.
382 NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0"
383 bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST)
384 self.assertEquals(bytes_sent, len(NO_ACK_MODE_REQUEST))
385
386 # Receive the ack and "OK"
387 self.expect_socket_recv(stub_socket, re.compile(r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds)
388
389 # Send the final ack.
390 self.expect_socket_send(stub_socket, "+", timeout_seconds)
391
Todd Fialae50b2e42014-06-13 19:11:33 +0000392 def add_no_ack_remote_stream(self):
393 self.test_sequence.add_log_lines(
394 ["read packet: +",
395 "read packet: $QStartNoAckMode#b0",
396 "send packet: +",
397 "send packet: $OK#9a",
398 "read packet: +"],
399 True)
400
401 def add_verified_launch_packets(self, launch_args):
402 self.test_sequence.add_log_lines(
403 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
404 "send packet: $OK#00",
405 "read packet: $qLaunchSuccess#a5",
406 "send packet: $OK#00"],
407 True)
408
409 def add_thread_suffix_request_packets(self):
410 self.test_sequence.add_log_lines(
411 ["read packet: $QThreadSuffixSupported#00",
412 "send packet: $OK#00",
413 ], True)
414
415 def add_process_info_collection_packets(self):
416 self.test_sequence.add_log_lines(
417 ["read packet: $qProcessInfo#00",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000418 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000419 True)
420
421 _KNOWN_PROCESS_INFO_KEYS = [
422 "pid",
423 "parent-pid",
424 "real-uid",
425 "real-gid",
426 "effective-uid",
427 "effective-gid",
428 "cputype",
429 "cpusubtype",
430 "ostype",
431 "vendor",
432 "endian",
433 "ptrsize"
434 ]
435
436 def parse_process_info_response(self, context):
437 # Ensure we have a process info response.
438 self.assertIsNotNone(context)
439 process_info_raw = context.get("process_info_raw")
440 self.assertIsNotNone(process_info_raw)
441
442 # Pull out key:value; pairs.
443 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
444
445 # Validate keys are known.
446 for (key, val) in process_info_dict.items():
447 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
448 self.assertIsNotNone(val)
449
450 return process_info_dict
451
452 def add_register_info_collection_packets(self):
453 self.test_sequence.add_log_lines(
454 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
455 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
456 "save_key":"reg_info_responses" } ],
457 True)
458
459 def parse_register_info_packets(self, context):
460 """Return an array of register info dictionaries, one per register info."""
461 reg_info_responses = context.get("reg_info_responses")
462 self.assertIsNotNone(reg_info_responses)
463
464 # Parse register infos.
465 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
466
Todd Fiala50a211b2014-06-14 22:00:36 +0000467 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000468 if not timeout_seconds:
469 timeout_seconds = self._TIMEOUT_SECONDS
470 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000471
472 _KNOWN_REGINFO_KEYS = [
473 "name",
474 "alt-name",
475 "bitsize",
476 "offset",
477 "encoding",
478 "format",
479 "set",
480 "gcc",
481 "dwarf",
482 "generic",
483 "container-regs",
484 "invalidate-regs"
485 ]
486
487 def assert_valid_reg_info(self, reg_info):
488 # Assert we know about all the reginfo keys parsed.
489 for key in reg_info:
490 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
491
492 # Check the bare-minimum expected set of register info keys.
493 self.assertTrue("name" in reg_info)
494 self.assertTrue("bitsize" in reg_info)
495 self.assertTrue("offset" in reg_info)
496 self.assertTrue("encoding" in reg_info)
497 self.assertTrue("format" in reg_info)
498
499 def find_pc_reg_info(self, reg_infos):
500 lldb_reg_index = 0
501 for reg_info in reg_infos:
502 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
503 return (lldb_reg_index, reg_info)
504 lldb_reg_index += 1
505
506 return (None, None)
507
508 def add_lldb_register_index(self, reg_infos):
509 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
510
511 We'll use this when we want to call packets like P/p with a register index but do so
512 on only a subset of the full register info set.
513 """
514 self.assertIsNotNone(reg_infos)
515
516 reg_index = 0
517 for reg_info in reg_infos:
518 reg_info["lldb_register_index"] = reg_index
519 reg_index += 1
520
521 def add_query_memory_region_packets(self, address):
522 self.test_sequence.add_log_lines(
523 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
524 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
525 True)
526
Todd Fialac30281a2014-06-14 03:03:23 +0000527 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000528 self.assertIsNotNone(key_val_text)
529 kv_dict = {}
530 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000531 key = match.group(1)
532 val = match.group(2)
533 if key in kv_dict:
534 if allow_dupes:
535 if type(kv_dict[key]) == list:
536 kv_dict[key].append(val)
537 else:
538 # Promote to list
539 kv_dict[key] = [kv_dict[key], val]
540 else:
541 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
542 else:
543 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000544 return kv_dict
545
546 def parse_memory_region_packet(self, context):
547 # Ensure we have a context.
548 self.assertIsNotNone(context.get("memory_region_response"))
549
550 # Pull out key:value; pairs.
551 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
552
553 # Validate keys are known.
554 for (key, val) in mem_region_dict.items():
555 self.assertTrue(key in ["start", "size", "permissions", "error"])
556 self.assertIsNotNone(val)
557
558 # Return the dictionary of key-value pairs for the memory region.
559 return mem_region_dict
560
561 def assert_address_within_memory_region(self, test_address, mem_region_dict):
562 self.assertIsNotNone(mem_region_dict)
563 self.assertTrue("start" in mem_region_dict)
564 self.assertTrue("size" in mem_region_dict)
565
566 range_start = int(mem_region_dict["start"], 16)
567 range_size = int(mem_region_dict["size"], 16)
568 range_end = range_start + range_size
569
570 if test_address < range_start:
571 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
572 elif test_address >= range_end:
573 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
574
575 def add_threadinfo_collection_packets(self):
576 self.test_sequence.add_log_lines(
577 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
578 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
579 "save_key":"threadinfo_responses" } ],
580 True)
581
582 def parse_threadinfo_packets(self, context):
583 """Return an array of thread ids (decimal ints), one per thread."""
584 threadinfo_responses = context.get("threadinfo_responses")
585 self.assertIsNotNone(threadinfo_responses)
586
587 thread_ids = []
588 for threadinfo_response in threadinfo_responses:
589 new_thread_infos = parse_threadinfo_response(threadinfo_response)
590 thread_ids.extend(new_thread_infos)
591 return thread_ids
592
593 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
594 start_time = time.time()
595 timeout_time = start_time + timeout_seconds
596
597 actual_thread_count = 0
598 while actual_thread_count < thread_count:
599 self.reset_test_sequence()
600 self.add_threadinfo_collection_packets()
601
602 context = self.expect_gdbremote_sequence()
603 self.assertIsNotNone(context)
604
605 threads = self.parse_threadinfo_packets(context)
606 self.assertIsNotNone(threads)
607
608 actual_thread_count = len(threads)
609
610 if time.time() > timeout_time:
611 raise Exception(
612 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
613 timeout_seconds, thread_count, actual_thread_count))
614
615 return threads
616
617 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
618 self.test_sequence.add_log_lines(
619 [# Set the breakpoint.
620 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
621 # Verify the stub could set it.
622 "send packet: $OK#00",
623 ], True)
624
625 if (do_continue):
626 self.test_sequence.add_log_lines(
627 [# Continue the inferior.
628 "read packet: $c#00",
629 # Expect a breakpoint stop report.
630 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
631 ], True)
632
633 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
634 self.test_sequence.add_log_lines(
635 [# Remove the breakpoint.
636 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
637 # Verify the stub could unset it.
638 "send packet: $OK#00",
639 ], True)
640
641 def add_qSupported_packets(self):
642 self.test_sequence.add_log_lines(
643 ["read packet: $qSupported#00",
644 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
645 ], True)
646
647 _KNOWN_QSUPPORTED_STUB_FEATURES = [
648 "augmented-libraries-svr4-read",
649 "PacketSize",
650 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000651 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000652 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000653 "qXfer:auxv:read",
654 "qXfer:libraries:read",
655 "qXfer:libraries-svr4:read",
656 ]
657
658 def parse_qSupported_response(self, context):
659 self.assertIsNotNone(context)
660
661 raw_response = context.get("qSupported_response")
662 self.assertIsNotNone(raw_response)
663
664 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
665 # +,-,? is stripped from the key and set as the value.
666 supported_dict = {}
667 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
668 key = match.group(1)
669 val = match.group(3)
670
671 # key=val: store as is
672 if val and len(val) > 0:
673 supported_dict[key] = val
674 else:
675 if len(key) < 2:
676 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
677 supported_type = key[-1]
678 key = key[:-1]
679 if not supported_type in ["+", "-", "?"]:
680 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
681 supported_dict[key] = supported_type
682 # Ensure we know the supported element
683 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
684 raise Exception("unknown qSupported stub feature reported: %s" % key)
685
686 return supported_dict
687
688 def run_process_then_stop(self, run_seconds=1):
689 # Tell the stub to continue.
690 self.test_sequence.add_log_lines(
691 ["read packet: $vCont;c#00"],
692 True)
693 context = self.expect_gdbremote_sequence()
694
695 # Wait for run_seconds.
696 time.sleep(run_seconds)
697
698 # Send an interrupt, capture a T response.
699 self.reset_test_sequence()
700 self.test_sequence.add_log_lines(
701 ["read packet: {}".format(chr(03)),
702 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
703 True)
704 context = self.expect_gdbremote_sequence()
705 self.assertIsNotNone(context)
706 self.assertIsNotNone(context.get("stop_result"))
707
708 return context
709
710 def select_modifiable_register(self, reg_infos):
711 """Find a register that can be read/written freely."""
712 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
713
714 # First check for the first register from the preferred register name set.
715 alternative_register_index = None
716
717 self.assertIsNotNone(reg_infos)
718 for reg_info in reg_infos:
719 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
720 # We found a preferred register. Use it.
721 return reg_info["lldb_register_index"]
722 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
723 # A frame pointer register will do as a register to modify temporarily.
724 alternative_register_index = reg_info["lldb_register_index"]
725
726 # We didn't find a preferred register. Return whatever alternative register
727 # we found, if any.
728 return alternative_register_index
729
730 def extract_registers_from_stop_notification(self, stop_key_vals_text):
731 self.assertIsNotNone(stop_key_vals_text)
732 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
733
734 registers = {}
735 for (key, val) in kv_dict.items():
736 if re.match(r"^[0-9a-fA-F]+", key):
737 registers[int(key, 16)] = val
738 return registers
739
740 def gather_register_infos(self):
741 self.reset_test_sequence()
742 self.add_register_info_collection_packets()
743
744 context = self.expect_gdbremote_sequence()
745 self.assertIsNotNone(context)
746
747 reg_infos = self.parse_register_info_packets(context)
748 self.assertIsNotNone(reg_infos)
749 self.add_lldb_register_index(reg_infos)
750
751 return reg_infos
752
753 def find_generic_register_with_name(self, reg_infos, generic_name):
754 self.assertIsNotNone(reg_infos)
755 for reg_info in reg_infos:
756 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
757 return reg_info
758 return None
759
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000760 def decode_gdbremote_binary(self, encoded_bytes):
761 decoded_bytes = ""
762 i = 0
763 while i < len(encoded_bytes):
764 if encoded_bytes[i] == "}":
765 # Handle escaped char.
766 self.assertTrue(i + 1 < len(encoded_bytes))
767 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
768 i +=2
769 elif encoded_bytes[i] == "*":
770 # Handle run length encoding.
771 self.assertTrue(len(decoded_bytes) > 0)
772 self.assertTrue(i + 1 < len(encoded_bytes))
773 repeat_count = ord(encoded_bytes[i+1]) - 29
774 decoded_bytes += decoded_bytes[-1] * repeat_count
775 i += 2
776 else:
777 decoded_bytes += encoded_bytes[i]
778 i += 1
779 return decoded_bytes
780
781 def build_auxv_dict(self, endian, word_size, auxv_data):
782 self.assertIsNotNone(endian)
783 self.assertIsNotNone(word_size)
784 self.assertIsNotNone(auxv_data)
785
786 auxv_dict = {}
787
788 while len(auxv_data) > 0:
789 # Chop off key.
790 raw_key = auxv_data[:word_size]
791 auxv_data = auxv_data[word_size:]
792
793 # Chop of value.
794 raw_value = auxv_data[:word_size]
795 auxv_data = auxv_data[word_size:]
796
797 # Convert raw text from target endian.
798 key = unpack_endian_binary_string(endian, raw_key)
799 value = unpack_endian_binary_string(endian, raw_value)
800
801 # Handle ending entry.
802 if key == 0:
803 self.assertEquals(value, 0)
804 return auxv_dict
805
806 # The key should not already be present.
807 self.assertFalse(key in auxv_dict)
808 auxv_dict[key] = value
809
810 self.fail("should not reach here - implies required double zero entry not found")
811 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000812
813 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
814 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
815 offset = 0
816 done = False
817 decoded_data = ""
818
819 while not done:
820 # Grab the next iteration of data.
821 self.reset_test_sequence()
822 self.test_sequence.add_log_lines([
823 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000824 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000825 ], True)
826
827 context = self.expect_gdbremote_sequence()
828 self.assertIsNotNone(context)
829
830 response_type = context.get("response_type")
831 self.assertIsNotNone(response_type)
832 self.assertTrue(response_type in ["l", "m"])
833
834 # Move offset along.
835 offset += chunk_length
836
837 # Figure out if we're done. We're done if the response type is l.
838 done = response_type == "l"
839
840 # Decode binary data.
841 content_raw = context.get("content_raw")
842 if content_raw and len(content_raw) > 0:
843 self.assertIsNotNone(content_raw)
844 decoded_data += self.decode_gdbremote_binary(content_raw)
845 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000846
847 def add_interrupt_packets(self):
848 self.test_sequence.add_log_lines([
849 # Send the intterupt.
850 "read packet: {}".format(chr(03)),
851 # And wait for the stop notification.
852 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
853 ], True)
854
855 def parse_interrupt_packets(self, context):
856 self.assertIsNotNone(context.get("stop_signo"))
857 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000858 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
859
860 def add_QSaveRegisterState_packets(self, thread_id):
861 if thread_id:
862 # Use the thread suffix form.
863 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
864 else:
865 request = "read packet: $QSaveRegisterState#00"
866
867 self.test_sequence.add_log_lines([
868 request,
869 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
870 ], True)
871
872 def parse_QSaveRegisterState_response(self, context):
873 self.assertIsNotNone(context)
874
875 save_response = context.get("save_response")
876 self.assertIsNotNone(save_response)
877
878 if len(save_response) < 1 or save_response[0] == "E":
879 # error received
880 return (False, None)
881 else:
882 return (True, int(save_response))
883
884 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
885 if thread_id:
886 # Use the thread suffix form.
887 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
888 else:
889 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
890
891 self.test_sequence.add_log_lines([
892 request,
893 "send packet: $OK#00"
894 ], True)
895
896 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
897 self.assertIsNotNone(reg_infos)
898
899 successful_writes = 0
900 failed_writes = 0
901
902 for reg_info in reg_infos:
903 # Use the lldb register index added to the reg info. We're not necessarily
904 # working off a full set of register infos, so an inferred register index could be wrong.
905 reg_index = reg_info["lldb_register_index"]
906 self.assertIsNotNone(reg_index)
907
908 reg_byte_size = int(reg_info["bitsize"])/8
909 self.assertTrue(reg_byte_size > 0)
910
911 # Handle thread suffix.
912 if thread_id:
913 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
914 else:
915 p_request = "read packet: $p{:x}#00".format(reg_index)
916
917 # Read the existing value.
918 self.reset_test_sequence()
919 self.test_sequence.add_log_lines([
920 p_request,
921 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
922 ], True)
923 context = self.expect_gdbremote_sequence()
924 self.assertIsNotNone(context)
925
926 # Verify the response length.
927 p_response = context.get("p_response")
928 self.assertIsNotNone(p_response)
929 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
930
931 # Flip the value by xoring with all 1s
932 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
933 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
934 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
935
936 # Handle thread suffix for P.
937 if thread_id:
938 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
939 else:
940 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
941
942 # Write the flipped value to the register.
943 self.reset_test_sequence()
944 self.test_sequence.add_log_lines([
945 P_request,
946 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
947 ], True)
948 context = self.expect_gdbremote_sequence()
949 self.assertIsNotNone(context)
950
951 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
952 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
953 # all flipping perfectly.
954 P_response = context.get("P_response")
955 self.assertIsNotNone(P_response)
956 if P_response == "OK":
957 successful_writes += 1
958 else:
959 failed_writes += 1
960 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
961
962 # Read back the register value, ensure it matches the flipped value.
963 if P_response == "OK":
964 self.reset_test_sequence()
965 self.test_sequence.add_log_lines([
966 p_request,
967 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
968 ], True)
969 context = self.expect_gdbremote_sequence()
970 self.assertIsNotNone(context)
971
972 verify_p_response_raw = context.get("p_response")
973 self.assertIsNotNone(verify_p_response_raw)
974 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
975
976 if verify_bits != flipped_bits_int:
977 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
978 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
979 successful_writes -= 1
980 failed_writes +=1
981
982 return (successful_writes, failed_writes)
983
984 def is_bit_flippable_register(self, reg_info):
985 if not reg_info:
986 return False
987 if not "set" in reg_info:
988 return False
989 if reg_info["set"] != "General Purpose Registers":
990 return False
991 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
992 # Don't try to bit flip registers contained in another register.
993 return False
994 if re.match("^.s$", reg_info["name"]):
995 # This is a 2-letter register name that ends in "s", like a segment register.
996 # Don't try to bit flip these.
997 return False
998 # Okay, this looks fine-enough.
999 return True
1000
1001 def read_register_values(self, reg_infos, endian, thread_id=None):
1002 self.assertIsNotNone(reg_infos)
1003 values = {}
1004
1005 for reg_info in reg_infos:
1006 # We append a register index when load reg infos so we can work with subsets.
1007 reg_index = reg_info.get("lldb_register_index")
1008 self.assertIsNotNone(reg_index)
1009
1010 # Handle thread suffix.
1011 if thread_id:
1012 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
1013 else:
1014 p_request = "read packet: $p{:x}#00".format(reg_index)
1015
1016 # Read it with p.
1017 self.reset_test_sequence()
1018 self.test_sequence.add_log_lines([
1019 p_request,
1020 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
1021 ], True)
1022 context = self.expect_gdbremote_sequence()
1023 self.assertIsNotNone(context)
1024
1025 # Convert value from target endian to integral.
1026 p_response = context.get("p_response")
1027 self.assertIsNotNone(p_response)
1028 self.assertTrue(len(p_response) > 0)
1029 self.assertFalse(p_response[0] == "E")
1030
1031 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
1032
Todd Fialae2202002014-06-27 22:11:56 +00001033 return values
1034
1035 def add_vCont_query_packets(self):
1036 self.test_sequence.add_log_lines([
1037 "read packet: $vCont?#00",
1038 {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } },
1039 ], True)
1040
1041 def parse_vCont_query_response(self, context):
1042 self.assertIsNotNone(context)
1043 vCont_query_response = context.get("vCont_query_response")
1044
1045 # Handle case of no vCont support at all - in which case the capture group will be none or zero length.
1046 if not vCont_query_response or len(vCont_query_response) == 0:
1047 return {}
1048
1049 return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0}
1050
1051 def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"):
1052 """Used by single step test that appears in a few different contexts."""
1053 single_step_count = 0
1054
1055 while single_step_count < max_step_count:
1056 self.assertIsNotNone(thread_id)
1057
1058 # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id.
1059 step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
1060 # print "\nstep_packet created: {}\n".format(step_packet)
1061
1062 # Single step.
1063 self.reset_test_sequence()
1064 if use_Hc_packet:
1065 self.test_sequence.add_log_lines(
1066 [# Set the continue thread.
1067 "read packet: $Hc{0:x}#00".format(thread_id),
1068 "send packet: $OK#00",
1069 ], True)
1070 self.test_sequence.add_log_lines([
1071 # Single step.
1072 step_packet,
1073 # "read packet: $vCont;s:{0:x}#00".format(thread_id),
1074 # Expect a breakpoint stop report.
1075 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
1076 ], True)
1077 context = self.expect_gdbremote_sequence()
1078 self.assertIsNotNone(context)
1079 self.assertIsNotNone(context.get("stop_signo"))
1080 self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP)
1081
1082 single_step_count += 1
1083
1084 # See if the predicate is true. If so, we're done.
1085 if predicate(args):
1086 return (True, single_step_count)
1087
1088 # The predicate didn't return true within the runaway step count.
1089 return (False, single_step_count)
1090
1091 def g_c1_c2_contents_are(self, args):
1092 """Used by single step test that appears in a few different contexts."""
1093 g_c1_address = args["g_c1_address"]
1094 g_c2_address = args["g_c2_address"]
1095 expected_g_c1 = args["expected_g_c1"]
1096 expected_g_c2 = args["expected_g_c2"]
1097
1098 # Read g_c1 and g_c2 contents.
1099 self.reset_test_sequence()
1100 self.test_sequence.add_log_lines(
1101 ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
1102 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} },
1103 "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
1104 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }],
1105 True)
1106
1107 # Run the packet stream.
1108 context = self.expect_gdbremote_sequence()
1109 self.assertIsNotNone(context)
1110
1111 # Check if what we read from inferior memory is what we are expecting.
1112 self.assertIsNotNone(context.get("g_c1_contents"))
1113 self.assertIsNotNone(context.get("g_c2_contents"))
1114
1115 return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2)
1116
1117 def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"):
1118 """Used by single step test that appears in a few different contexts."""
1119 # Start up the inferior.
1120 procs = self.prep_debug_monitor_and_inferior(
1121 inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"])
1122
1123 # Run the process
1124 self.test_sequence.add_log_lines(
1125 [# Start running after initial stop.
1126 "read packet: $c#00",
1127 # Match output line that prints the memory address of the function call entry point.
1128 # Note we require launch-only testing so we can get inferior otuput.
1129 { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
1130 "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} },
1131 # Now stop the inferior.
1132 "read packet: {}".format(chr(03)),
1133 # And wait for the stop notification.
1134 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }],
1135 True)
1136
1137 # Run the packet stream.
1138 context = self.expect_gdbremote_sequence()
1139 self.assertIsNotNone(context)
1140
1141 # Grab the main thread id.
1142 self.assertIsNotNone(context.get("stop_thread_id"))
1143 main_thread_id = int(context.get("stop_thread_id"), 16)
1144
1145 # Grab the function address.
1146 self.assertIsNotNone(context.get("function_address"))
1147 function_address = int(context.get("function_address"), 16)
1148
1149 # Grab the data addresses.
1150 self.assertIsNotNone(context.get("g_c1_address"))
1151 g_c1_address = int(context.get("g_c1_address"), 16)
1152
1153 self.assertIsNotNone(context.get("g_c2_address"))
1154 g_c2_address = int(context.get("g_c2_address"), 16)
1155
1156 # Set a breakpoint at the given address.
1157 # Note this might need to be switched per platform (ARM, mips, etc.).
1158 BREAKPOINT_KIND = 1
1159 self.reset_test_sequence()
1160 self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND)
1161 context = self.expect_gdbremote_sequence()
1162 self.assertIsNotNone(context)
1163
1164 # Remove the breakpoint.
1165 self.reset_test_sequence()
1166 self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND)
1167 context = self.expect_gdbremote_sequence()
1168 self.assertIsNotNone(context)
1169
1170 # Verify g_c1 and g_c2 match expected initial state.
1171 args = {}
1172 args["g_c1_address"] = g_c1_address
1173 args["g_c2_address"] = g_c2_address
1174 args["expected_g_c1"] = "0"
1175 args["expected_g_c2"] = "1"
1176
1177 self.assertTrue(self.g_c1_c2_contents_are(args))
1178
1179 # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code.
1180 args["expected_g_c1"] = "1"
1181 args["expected_g_c2"] = "1"
1182 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1183 self.assertTrue(state_reached)
1184
1185 # Verify we hit the next state.
1186 args["expected_g_c1"] = "1"
1187 args["expected_g_c2"] = "0"
1188 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1189 self.assertTrue(state_reached)
1190 self.assertEquals(step_count, 1)
1191
1192 # Verify we hit the next state.
1193 args["expected_g_c1"] = "0"
1194 args["expected_g_c2"] = "0"
1195 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1196 self.assertTrue(state_reached)
1197 self.assertEquals(step_count, 1)
1198
1199 # Verify we hit the next state.
1200 args["expected_g_c1"] = "0"
1201 args["expected_g_c2"] = "1"
1202 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1203 self.assertTrue(state_reached)
1204 self.assertEquals(step_count, 1)
Todd Fialaaf245d12014-06-30 21:05:18 +00001205