blob: 8d769b83ed3a21ae50a4a97647fbbba747006af0 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fiala24189d42014-07-14 06:24:44 +00006import os
7import os.path
Todd Fialae50b2e42014-06-13 19:11:33 +00008import platform
Todd Fiala9e2d3292014-07-09 23:10:43 +00009import random
Todd Fialae2202002014-06-27 22:11:56 +000010import re
Todd Fiala31bde322014-07-26 20:39:17 +000011import select
Todd Fialae50b2e42014-06-13 19:11:33 +000012import sets
13import signal
14import socket
15import subprocess
16import sys
Todd Fiala24189d42014-07-14 06:24:44 +000017import tempfile
Todd Fialae50b2e42014-06-13 19:11:33 +000018import time
Todd Fiala24189d42014-07-14 06:24:44 +000019import unittest2
Todd Fialae50b2e42014-06-13 19:11:33 +000020from lldbtest import *
21from lldbgdbserverutils import *
22import logging
Todd Fialae50b2e42014-06-13 19:11:33 +000023
24class GdbRemoteTestCaseBase(TestBase):
25
26 mydir = TestBase.compute_mydir(__file__)
27
Todd Fialae50b2e42014-06-13 19:11:33 +000028 _TIMEOUT_SECONDS = 5
29
30 _GDBREMOTE_KILL_PACKET = "$k#6b"
31
32 _LOGGING_LEVEL = logging.WARNING
33 # _LOGGING_LEVEL = logging.DEBUG
34
Todd Fiala7306cf32014-07-29 22:30:01 +000035 # Start the inferior separately, attach to the inferior on the stub command line.
Todd Fialae50b2e42014-06-13 19:11:33 +000036 _STARTUP_ATTACH = "attach"
Todd Fiala7306cf32014-07-29 22:30:01 +000037 # Start the inferior separately, start the stub without attaching, allow the test to attach to the inferior however it wants (e.g. $vAttach;pid).
38 _STARTUP_ATTACH_MANUALLY = "attach_manually"
39 # Start the stub, and launch the inferior with an $A packet via the initial packet stream.
Todd Fialae50b2e42014-06-13 19:11:33 +000040 _STARTUP_LAUNCH = "launch"
41
42 # GDB Signal numbers that are not target-specific used for common exceptions
43 TARGET_EXC_BAD_ACCESS = 0x91
44 TARGET_EXC_BAD_INSTRUCTION = 0x92
45 TARGET_EXC_ARITHMETIC = 0x93
46 TARGET_EXC_EMULATION = 0x94
47 TARGET_EXC_SOFTWARE = 0x95
48 TARGET_EXC_BREAKPOINT = 0x96
49
50 def setUp(self):
51 TestBase.setUp(self)
52 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
53 logging.basicConfig(format=FORMAT)
54 self.logger = logging.getLogger(__name__)
55 self.logger.setLevel(self._LOGGING_LEVEL)
56 self.test_sequence = GdbRemoteTestSequence(self.logger)
57 self.set_inferior_startup_launch()
Todd Fiala9e2d3292014-07-09 23:10:43 +000058 self.port = self.get_next_port()
Todd Fiala67041192014-07-11 22:50:13 +000059 self.named_pipe_path = None
Todd Fiala24189d42014-07-14 06:24:44 +000060 self.named_pipe = None
61 self.named_pipe_fd = None
Todd Fialaf9ad21d2014-07-16 16:15:42 +000062 self.stub_sends_two_stop_notifications_on_kill = False
Todd Fiala31bde322014-07-26 20:39:17 +000063 self.stub_hostname = "localhost"
Todd Fialae50b2e42014-06-13 19:11:33 +000064
Todd Fiala9e2d3292014-07-09 23:10:43 +000065 def get_next_port(self):
66 return 12000 + random.randint(0,3999)
Todd Fialae50b2e42014-06-13 19:11:33 +000067
68 def reset_test_sequence(self):
69 self.test_sequence = GdbRemoteTestSequence(self.logger)
70
Todd Fiala24189d42014-07-14 06:24:44 +000071 def create_named_pipe(self):
72 # Create a temp dir and name for a pipe.
73 temp_dir = tempfile.mkdtemp()
74 named_pipe_path = os.path.join(temp_dir, "stub_port_number")
75
76 # Create the named pipe.
77 os.mkfifo(named_pipe_path)
78
79 # Open the read side of the pipe in non-blocking mode. This will return right away, ready or not.
80 named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK)
81
82 # Create the file for the named pipe. Note this will follow semantics of
83 # a non-blocking read side of a named pipe, which has different semantics
84 # than a named pipe opened for read in non-blocking mode.
85 named_pipe = os.fdopen(named_pipe_fd, "r")
86 self.assertIsNotNone(named_pipe)
87
88 def shutdown_named_pipe():
89 # Close the pipe.
90 try:
91 named_pipe.close()
92 except:
93 print "failed to close named pipe"
94 None
95
96 # Delete the pipe.
97 try:
98 os.remove(named_pipe_path)
99 except:
100 print "failed to delete named pipe: {}".format(named_pipe_path)
101 None
102
103 # Delete the temp directory.
104 try:
105 os.rmdir(temp_dir)
106 except:
107 print "failed to delete temp dir: {}, directory contents: '{}'".format(temp_dir, os.listdir(temp_dir))
108 None
109
110 # Add the shutdown hook to clean up the named pipe.
111 self.addTearDownHook(shutdown_named_pipe)
112
113 # Clear the port so the stub selects a port number.
114 self.port = 0
115
116 return (named_pipe_path, named_pipe, named_pipe_fd)
117
118 def get_stub_port_from_named_socket(self, read_timeout_seconds=5):
119 # Wait for something to read with a max timeout.
120 (ready_readers, _, _) = select.select([self.named_pipe_fd], [], [], read_timeout_seconds)
121 self.assertIsNotNone(ready_readers, "write side of pipe has not written anything - stub isn't writing to pipe.")
122 self.assertNotEqual(len(ready_readers), 0, "write side of pipe has not written anything - stub isn't writing to pipe.")
123
124 # Read the port from the named pipe.
125 stub_port_raw = self.named_pipe.read()
126 self.assertIsNotNone(stub_port_raw)
127 self.assertNotEqual(len(stub_port_raw), 0, "no content to read on pipe")
128
129 # Trim null byte, convert to int.
130 stub_port_raw = stub_port_raw[:-1]
131 stub_port = int(stub_port_raw)
132 self.assertTrue(stub_port > 0)
133
134 return stub_port
135
136 def init_llgs_test(self, use_named_pipe=True):
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000137 if lldb.remote_platform:
138 # Remote platforms don't support named pipe based port negotiation
139 use_named_pipe = False
140
141 platform = self.dbg.GetSelectedPlatform()
142
143 shell_command = lldb.SBPlatformShellCommand("echo $PPID")
144 err = platform.Run(shell_command)
145 if err.Fail():
146 raise Exception("remote_platform.RunShellCommand('echo $PPID') failed: %s" % err)
147 pid = shell_command.GetOutput().strip()
148
149 shell_command = lldb.SBPlatformShellCommand("readlink /proc/%s/exe" % pid)
150 err = platform.Run(shell_command)
151 if err.Fail():
152 raise Exception("remote_platform.RunShellCommand('readlink /proc/%d/exe') failed: %s" % (pid, err))
153 self.debug_monitor_exe = shell_command.GetOutput().strip()
154 dname = self.dbg.GetSelectedPlatform().GetWorkingDirectory()
155 else:
156 self.debug_monitor_exe = get_lldb_server_exe()
157 if not self.debug_monitor_exe:
158 self.skipTest("lldb-server exe not found")
159 dname = os.path.join(os.environ["LLDB_TEST"], os.environ["LLDB_SESSION_DIRNAME"])
160
161 self.debug_monitor_extra_args = ["gdbserver", "-c", "log enable -T -f {}/process-{}.log lldb break process thread".format(dname, self.id()), "-c", "log enable -T -f {}/packets-{}.log gdb-remote packets".format(dname, self.id())]
Todd Fiala24189d42014-07-14 06:24:44 +0000162 if use_named_pipe:
163 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialae50b2e42014-06-13 19:11:33 +0000164
Todd Fiala24189d42014-07-14 06:24:44 +0000165 def init_debugserver_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000166 self.debug_monitor_exe = get_debugserver_exe()
167 if not self.debug_monitor_exe:
168 self.skipTest("debugserver exe not found")
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000169 self.debug_monitor_extra_args = ["--log-file=/tmp/packets-{}.log".format(self._testMethodName), "--log-flags=0x800000"]
Todd Fiala24189d42014-07-14 06:24:44 +0000170 if use_named_pipe:
171 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialaf9ad21d2014-07-16 16:15:42 +0000172 # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification
173 # when the process truly dies.
174 self.stub_sends_two_stop_notifications_on_kill = True
Todd Fialae50b2e42014-06-13 19:11:33 +0000175
176 def create_socket(self):
177 sock = socket.socket()
178 logger = self.logger
179
180 def shutdown_socket():
181 if sock:
182 try:
Robert Flack8cc4cf12015-03-06 14:36:33 +0000183 # send the kill packet so lldb-server shuts down gracefully
Todd Fialae50b2e42014-06-13 19:11:33 +0000184 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
185 except:
186 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
187
188 try:
189 sock.close()
190 except:
191 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
192
193 self.addTearDownHook(shutdown_socket)
194
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000195 triple = self.dbg.GetSelectedPlatform().GetTriple()
196 if re.match(".*-.*-.*-android", triple):
197 subprocess.call(["adb", "forward", "tcp:%d" % self.port, "tcp:%d" % self.port])
198 def remove_port_forward():
199 subprocess.call(["adb", "forward", "--remove", "tcp:%d" % self.port])
200
201 self.addTearDownHook(remove_port_forward)
202
Todd Fiala31bde322014-07-26 20:39:17 +0000203 connect_info = (self.stub_hostname, self.port)
Todd Fiala24189d42014-07-14 06:24:44 +0000204 # print "connecting to stub on {}:{}".format(connect_info[0], connect_info[1])
205 sock.connect(connect_info)
206
Todd Fialae50b2e42014-06-13 19:11:33 +0000207 return sock
208
209 def set_inferior_startup_launch(self):
210 self._inferior_startup = self._STARTUP_LAUNCH
211
212 def set_inferior_startup_attach(self):
213 self._inferior_startup = self._STARTUP_ATTACH
214
Todd Fiala7306cf32014-07-29 22:30:01 +0000215 def set_inferior_startup_attach_manually(self):
216 self._inferior_startup = self._STARTUP_ATTACH_MANUALLY
217
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000218 def get_debug_monitor_command_line_args(self, attach_pid=None):
219 commandline_args = self.debug_monitor_extra_args + ["localhost:{}".format(self.port)]
Todd Fialae50b2e42014-06-13 19:11:33 +0000220 if attach_pid:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000221 commandline_args += ["--attach=%d" % attach_pid]
Todd Fiala67041192014-07-11 22:50:13 +0000222 if self.named_pipe_path:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000223 commandline_args += ["--named-pipe", self.named_pipe_path]
224 return commandline_args
225
226 def run_platform_command(self, cmd):
227 platform = self.dbg.GetSelectedPlatform()
228 shell_command = lldb.SBPlatformShellCommand(cmd)
229 err = platform.Run(shell_command)
230 return (err, shell_command.GetOutput())
Todd Fiala31bde322014-07-26 20:39:17 +0000231
232 def launch_debug_monitor(self, attach_pid=None, logfile=None):
233 # Create the command line.
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000234 commandline_args = self.get_debug_monitor_command_line_args(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000235
Todd Fiala8aae4f42014-06-13 23:34:17 +0000236 # Start the server.
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000237 server = self.spawnSubprocess(self.debug_monitor_exe, commandline_args, install_remote=False)
238 self.addTearDownHook(self.cleanupSubprocesses)
Todd Fiala24189d42014-07-14 06:24:44 +0000239 self.assertIsNotNone(server)
Todd Fiala24189d42014-07-14 06:24:44 +0000240
241 # If we're receiving the stub's listening port from the named pipe, do that here.
242 if self.named_pipe:
243 self.port = self.get_stub_port_from_named_socket()
Todd Fialae50b2e42014-06-13 19:11:33 +0000244
Todd Fiala8aae4f42014-06-13 23:34:17 +0000245 return server
246
247 def connect_to_debug_monitor(self, attach_pid=None):
Todd Fiala24189d42014-07-14 06:24:44 +0000248 if self.named_pipe:
249 # Create the stub.
250 server = self.launch_debug_monitor(attach_pid=attach_pid)
251 self.assertIsNotNone(server)
252
253 def shutdown_debug_monitor():
254 try:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000255 server.terminate()
Todd Fiala24189d42014-07-14 06:24:44 +0000256 except:
257 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
258 self.addTearDownHook(shutdown_debug_monitor)
259
260 # Schedule debug monitor to be shut down during teardown.
261 logger = self.logger
262
263 # Attach to the stub and return a socket opened to it.
264 self.sock = self.create_socket()
265 return server
266
267 # We're using a random port algorithm to try not to collide with other ports,
268 # and retry a max # times.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000269 attempts = 0
270 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000271
Todd Fiala8aae4f42014-06-13 23:34:17 +0000272 while attempts < MAX_ATTEMPTS:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000273 server = self.launch_debug_monitor(attach_pid=attach_pid)
274
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000275 # Schedule debug monitor to be shut down during teardown.
276 logger = self.logger
277 def shutdown_debug_monitor():
Todd Fiala9e2d3292014-07-09 23:10:43 +0000278 try:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000279 server.terminate()
280 except:
281 logger.warning("failed to terminate server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
282 self.addTearDownHook(shutdown_debug_monitor)
283
284 # Create a socket to talk to the server
285 try:
286 self.sock = self.create_socket()
287 return server
288 except socket.error as serr:
289 # We're only trying to handle connection refused.
290 if serr.errno != errno.ECONNREFUSED:
291 raise serr
292 # We should close the server here to be safe.
293 server.terminate()
Todd Fiala9e2d3292014-07-09 23:10:43 +0000294
295 # Increment attempts.
296 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
297 attempts += 1
298
299 # And wait a random length of time before next attempt, to avoid collisions.
300 time.sleep(random.randint(1,5))
301
302 # Now grab a new port number.
303 self.port = self.get_next_port()
Todd Fiala8aae4f42014-06-13 23:34:17 +0000304
305 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000306
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000307 def launch_process_for_attach(self, inferior_args=None, sleep_seconds=3, exe_path=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000308 # We're going to start a child process that the debug monitor stub can later attach to.
309 # This process needs to be started so that it just hangs around for a while. We'll
310 # have it sleep.
Todd Fiala58a2f662014-08-12 17:02:07 +0000311 if not exe_path:
312 exe_path = os.path.abspath("a.out")
Todd Fialae50b2e42014-06-13 19:11:33 +0000313
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000314 args = []
Todd Fialae50b2e42014-06-13 19:11:33 +0000315 if inferior_args:
316 args.extend(inferior_args)
317 if sleep_seconds:
318 args.append("sleep:%d" % sleep_seconds)
319
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000320 return self.spawnSubprocess(exe_path, args)
Todd Fialae50b2e42014-06-13 19:11:33 +0000321
Todd Fiala58a2f662014-08-12 17:02:07 +0000322 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3, inferior_exe_path=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000323 """Prep the debug monitor, the inferior, and the expected packet stream.
324
325 Handle the separate cases of using the debug monitor in attach-to-inferior mode
326 and in launch-inferior mode.
327
328 For attach-to-inferior mode, the inferior process is first started, then
329 the debug monitor is started in attach to pid mode (using --attach on the
330 stub command line), and the no-ack-mode setup is appended to the packet
331 stream. The packet stream is not yet executed, ready to have more expected
332 packet entries added to it.
333
334 For launch-inferior mode, the stub is first started, then no ack mode is
335 setup on the expected packet stream, then the verified launch packets are added
336 to the expected socket stream. The packet stream is not yet executed, ready
337 to have more expected packet entries added to it.
338
339 The return value is:
340 {inferior:<inferior>, server:<server>}
341 """
342 inferior = None
343 attach_pid = None
344
Todd Fiala7306cf32014-07-29 22:30:01 +0000345 if self._inferior_startup == self._STARTUP_ATTACH or self._inferior_startup == self._STARTUP_ATTACH_MANUALLY:
Todd Fialae50b2e42014-06-13 19:11:33 +0000346 # Launch the process that we'll use as the inferior.
Todd Fiala58a2f662014-08-12 17:02:07 +0000347 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds, exe_path=inferior_exe_path)
Todd Fialae50b2e42014-06-13 19:11:33 +0000348 self.assertIsNotNone(inferior)
349 self.assertTrue(inferior.pid > 0)
Todd Fiala7306cf32014-07-29 22:30:01 +0000350 if self._inferior_startup == self._STARTUP_ATTACH:
351 # In this case, we want the stub to attach via the command line, so set the command line attach pid here.
352 attach_pid = inferior.pid
Todd Fialae50b2e42014-06-13 19:11:33 +0000353
Todd Fialae50b2e42014-06-13 19:11:33 +0000354 if self._inferior_startup == self._STARTUP_LAUNCH:
355 # Build launch args
Todd Fiala58a2f662014-08-12 17:02:07 +0000356 if not inferior_exe_path:
357 inferior_exe_path = os.path.abspath("a.out")
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000358
359 if lldb.remote_platform:
360 remote_work_dir = lldb.remote_platform.GetWorkingDirectory()
361 remote_path = os.path.join(remote_work_dir, os.path.basename(inferior_exe_path))
362 remote_file_spec = lldb.SBFileSpec(remote_path, False)
363 err = lldb.remote_platform.Install(lldb.SBFileSpec(inferior_exe_path, True), remote_file_spec)
364 if err.Fail():
365 raise Exception("remote_platform.Install('%s', '%s') failed: %s" % (inferior_exe_path, remote_path, err))
366 inferior_exe_path = remote_path
367
Todd Fiala58a2f662014-08-12 17:02:07 +0000368 launch_args = [inferior_exe_path]
Todd Fialae50b2e42014-06-13 19:11:33 +0000369 if inferior_args:
370 launch_args.extend(inferior_args)
371
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000372 # Launch the debug monitor stub, attaching to the inferior.
373 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
374 self.assertIsNotNone(server)
375
Todd Fialae50b2e42014-06-13 19:11:33 +0000376 # Build the expected protocol stream
377 self.add_no_ack_remote_stream()
378 if self._inferior_startup == self._STARTUP_LAUNCH:
379 self.add_verified_launch_packets(launch_args)
380
381 return {"inferior":inferior, "server":server}
382
Todd Fiala31bde322014-07-26 20:39:17 +0000383 def expect_socket_recv(self, sock, expected_content_regex, timeout_seconds):
384 response = ""
385 timeout_time = time.time() + timeout_seconds
386
387 while not expected_content_regex.match(response) and time.time() < timeout_time:
388 can_read, _, _ = select.select([sock], [], [], timeout_seconds)
389 if can_read and sock in can_read:
390 recv_bytes = sock.recv(4096)
391 if recv_bytes:
392 response += recv_bytes
393
394 self.assertTrue(expected_content_regex.match(response))
395
396 def expect_socket_send(self, sock, content, timeout_seconds):
397 request_bytes_remaining = content
398 timeout_time = time.time() + timeout_seconds
399
400 while len(request_bytes_remaining) > 0 and time.time() < timeout_time:
401 _, can_write, _ = select.select([], [sock], [], timeout_seconds)
402 if can_write and sock in can_write:
403 written_byte_count = sock.send(request_bytes_remaining)
404 request_bytes_remaining = request_bytes_remaining[written_byte_count:]
405 self.assertEquals(len(request_bytes_remaining), 0)
406
407 def do_handshake(self, stub_socket, timeout_seconds=5):
408 # Write the ack.
409 self.expect_socket_send(stub_socket, "+", timeout_seconds)
410
411 # Send the start no ack mode packet.
412 NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0"
413 bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST)
414 self.assertEquals(bytes_sent, len(NO_ACK_MODE_REQUEST))
415
416 # Receive the ack and "OK"
417 self.expect_socket_recv(stub_socket, re.compile(r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds)
418
419 # Send the final ack.
420 self.expect_socket_send(stub_socket, "+", timeout_seconds)
421
Todd Fialae50b2e42014-06-13 19:11:33 +0000422 def add_no_ack_remote_stream(self):
423 self.test_sequence.add_log_lines(
424 ["read packet: +",
425 "read packet: $QStartNoAckMode#b0",
426 "send packet: +",
427 "send packet: $OK#9a",
428 "read packet: +"],
429 True)
430
431 def add_verified_launch_packets(self, launch_args):
432 self.test_sequence.add_log_lines(
433 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
434 "send packet: $OK#00",
435 "read packet: $qLaunchSuccess#a5",
436 "send packet: $OK#00"],
437 True)
438
439 def add_thread_suffix_request_packets(self):
440 self.test_sequence.add_log_lines(
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000441 ["read packet: $QThreadSuffixSupported#e4",
Todd Fialae50b2e42014-06-13 19:11:33 +0000442 "send packet: $OK#00",
443 ], True)
444
445 def add_process_info_collection_packets(self):
446 self.test_sequence.add_log_lines(
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000447 ["read packet: $qProcessInfo#dc",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000448 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000449 True)
450
451 _KNOWN_PROCESS_INFO_KEYS = [
452 "pid",
453 "parent-pid",
454 "real-uid",
455 "real-gid",
456 "effective-uid",
457 "effective-gid",
458 "cputype",
459 "cpusubtype",
460 "ostype",
Todd Fialac540dd02014-08-26 18:21:02 +0000461 "triple",
Todd Fialae50b2e42014-06-13 19:11:33 +0000462 "vendor",
463 "endian",
464 "ptrsize"
465 ]
466
467 def parse_process_info_response(self, context):
468 # Ensure we have a process info response.
469 self.assertIsNotNone(context)
470 process_info_raw = context.get("process_info_raw")
471 self.assertIsNotNone(process_info_raw)
472
473 # Pull out key:value; pairs.
474 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
475
476 # Validate keys are known.
477 for (key, val) in process_info_dict.items():
478 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
479 self.assertIsNotNone(val)
480
481 return process_info_dict
482
483 def add_register_info_collection_packets(self):
484 self.test_sequence.add_log_lines(
485 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
486 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
487 "save_key":"reg_info_responses" } ],
488 True)
489
490 def parse_register_info_packets(self, context):
491 """Return an array of register info dictionaries, one per register info."""
492 reg_info_responses = context.get("reg_info_responses")
493 self.assertIsNotNone(reg_info_responses)
494
495 # Parse register infos.
496 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
497
Todd Fiala50a211b2014-06-14 22:00:36 +0000498 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000499 if not timeout_seconds:
500 timeout_seconds = self._TIMEOUT_SECONDS
501 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000502
503 _KNOWN_REGINFO_KEYS = [
504 "name",
505 "alt-name",
506 "bitsize",
507 "offset",
508 "encoding",
509 "format",
510 "set",
511 "gcc",
512 "dwarf",
513 "generic",
514 "container-regs",
515 "invalidate-regs"
516 ]
517
518 def assert_valid_reg_info(self, reg_info):
519 # Assert we know about all the reginfo keys parsed.
520 for key in reg_info:
521 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
522
523 # Check the bare-minimum expected set of register info keys.
524 self.assertTrue("name" in reg_info)
525 self.assertTrue("bitsize" in reg_info)
526 self.assertTrue("offset" in reg_info)
527 self.assertTrue("encoding" in reg_info)
528 self.assertTrue("format" in reg_info)
529
530 def find_pc_reg_info(self, reg_infos):
531 lldb_reg_index = 0
532 for reg_info in reg_infos:
533 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
534 return (lldb_reg_index, reg_info)
535 lldb_reg_index += 1
536
537 return (None, None)
538
539 def add_lldb_register_index(self, reg_infos):
540 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
541
542 We'll use this when we want to call packets like P/p with a register index but do so
543 on only a subset of the full register info set.
544 """
545 self.assertIsNotNone(reg_infos)
546
547 reg_index = 0
548 for reg_info in reg_infos:
549 reg_info["lldb_register_index"] = reg_index
550 reg_index += 1
551
552 def add_query_memory_region_packets(self, address):
553 self.test_sequence.add_log_lines(
554 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
555 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
556 True)
557
Todd Fialac30281a2014-06-14 03:03:23 +0000558 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000559 self.assertIsNotNone(key_val_text)
560 kv_dict = {}
561 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000562 key = match.group(1)
563 val = match.group(2)
564 if key in kv_dict:
565 if allow_dupes:
566 if type(kv_dict[key]) == list:
567 kv_dict[key].append(val)
568 else:
569 # Promote to list
570 kv_dict[key] = [kv_dict[key], val]
571 else:
572 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
573 else:
574 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000575 return kv_dict
576
577 def parse_memory_region_packet(self, context):
578 # Ensure we have a context.
579 self.assertIsNotNone(context.get("memory_region_response"))
580
581 # Pull out key:value; pairs.
582 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
583
584 # Validate keys are known.
585 for (key, val) in mem_region_dict.items():
586 self.assertTrue(key in ["start", "size", "permissions", "error"])
587 self.assertIsNotNone(val)
588
589 # Return the dictionary of key-value pairs for the memory region.
590 return mem_region_dict
591
592 def assert_address_within_memory_region(self, test_address, mem_region_dict):
593 self.assertIsNotNone(mem_region_dict)
594 self.assertTrue("start" in mem_region_dict)
595 self.assertTrue("size" in mem_region_dict)
596
597 range_start = int(mem_region_dict["start"], 16)
598 range_size = int(mem_region_dict["size"], 16)
599 range_end = range_start + range_size
600
601 if test_address < range_start:
602 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
603 elif test_address >= range_end:
604 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
605
606 def add_threadinfo_collection_packets(self):
607 self.test_sequence.add_log_lines(
608 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
609 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
610 "save_key":"threadinfo_responses" } ],
611 True)
612
613 def parse_threadinfo_packets(self, context):
614 """Return an array of thread ids (decimal ints), one per thread."""
615 threadinfo_responses = context.get("threadinfo_responses")
616 self.assertIsNotNone(threadinfo_responses)
617
618 thread_ids = []
619 for threadinfo_response in threadinfo_responses:
620 new_thread_infos = parse_threadinfo_response(threadinfo_response)
621 thread_ids.extend(new_thread_infos)
622 return thread_ids
623
624 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
625 start_time = time.time()
626 timeout_time = start_time + timeout_seconds
627
628 actual_thread_count = 0
629 while actual_thread_count < thread_count:
630 self.reset_test_sequence()
631 self.add_threadinfo_collection_packets()
632
633 context = self.expect_gdbremote_sequence()
634 self.assertIsNotNone(context)
635
636 threads = self.parse_threadinfo_packets(context)
637 self.assertIsNotNone(threads)
638
639 actual_thread_count = len(threads)
640
641 if time.time() > timeout_time:
642 raise Exception(
643 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
644 timeout_seconds, thread_count, actual_thread_count))
645
646 return threads
647
648 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
649 self.test_sequence.add_log_lines(
650 [# Set the breakpoint.
651 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
652 # Verify the stub could set it.
653 "send packet: $OK#00",
654 ], True)
655
656 if (do_continue):
657 self.test_sequence.add_log_lines(
658 [# Continue the inferior.
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000659 "read packet: $c#63",
Todd Fialae50b2e42014-06-13 19:11:33 +0000660 # Expect a breakpoint stop report.
661 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
662 ], True)
663
664 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
665 self.test_sequence.add_log_lines(
666 [# Remove the breakpoint.
667 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
668 # Verify the stub could unset it.
669 "send packet: $OK#00",
670 ], True)
671
672 def add_qSupported_packets(self):
673 self.test_sequence.add_log_lines(
674 ["read packet: $qSupported#00",
675 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
676 ], True)
677
678 _KNOWN_QSUPPORTED_STUB_FEATURES = [
679 "augmented-libraries-svr4-read",
680 "PacketSize",
681 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000682 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000683 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000684 "qXfer:auxv:read",
685 "qXfer:libraries:read",
686 "qXfer:libraries-svr4:read",
687 ]
688
689 def parse_qSupported_response(self, context):
690 self.assertIsNotNone(context)
691
692 raw_response = context.get("qSupported_response")
693 self.assertIsNotNone(raw_response)
694
695 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
696 # +,-,? is stripped from the key and set as the value.
697 supported_dict = {}
698 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
699 key = match.group(1)
700 val = match.group(3)
701
702 # key=val: store as is
703 if val and len(val) > 0:
704 supported_dict[key] = val
705 else:
706 if len(key) < 2:
707 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
708 supported_type = key[-1]
709 key = key[:-1]
710 if not supported_type in ["+", "-", "?"]:
711 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
712 supported_dict[key] = supported_type
713 # Ensure we know the supported element
714 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
715 raise Exception("unknown qSupported stub feature reported: %s" % key)
716
717 return supported_dict
718
719 def run_process_then_stop(self, run_seconds=1):
720 # Tell the stub to continue.
721 self.test_sequence.add_log_lines(
Stephane Sezerb6e81922014-11-20 18:50:46 +0000722 ["read packet: $vCont;c#a8"],
Todd Fialae50b2e42014-06-13 19:11:33 +0000723 True)
724 context = self.expect_gdbremote_sequence()
725
726 # Wait for run_seconds.
727 time.sleep(run_seconds)
728
729 # Send an interrupt, capture a T response.
730 self.reset_test_sequence()
731 self.test_sequence.add_log_lines(
732 ["read packet: {}".format(chr(03)),
733 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
734 True)
735 context = self.expect_gdbremote_sequence()
736 self.assertIsNotNone(context)
737 self.assertIsNotNone(context.get("stop_result"))
738
739 return context
740
741 def select_modifiable_register(self, reg_infos):
742 """Find a register that can be read/written freely."""
743 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
744
745 # First check for the first register from the preferred register name set.
746 alternative_register_index = None
747
748 self.assertIsNotNone(reg_infos)
749 for reg_info in reg_infos:
750 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
751 # We found a preferred register. Use it.
752 return reg_info["lldb_register_index"]
753 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
754 # A frame pointer register will do as a register to modify temporarily.
755 alternative_register_index = reg_info["lldb_register_index"]
756
757 # We didn't find a preferred register. Return whatever alternative register
758 # we found, if any.
759 return alternative_register_index
760
761 def extract_registers_from_stop_notification(self, stop_key_vals_text):
762 self.assertIsNotNone(stop_key_vals_text)
763 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
764
765 registers = {}
766 for (key, val) in kv_dict.items():
Stephane Sezer5109a792014-11-14 09:46:21 +0000767 if re.match(r"^[0-9a-fA-F]+$", key):
Todd Fialae50b2e42014-06-13 19:11:33 +0000768 registers[int(key, 16)] = val
769 return registers
770
771 def gather_register_infos(self):
772 self.reset_test_sequence()
773 self.add_register_info_collection_packets()
774
775 context = self.expect_gdbremote_sequence()
776 self.assertIsNotNone(context)
777
778 reg_infos = self.parse_register_info_packets(context)
779 self.assertIsNotNone(reg_infos)
780 self.add_lldb_register_index(reg_infos)
781
782 return reg_infos
783
784 def find_generic_register_with_name(self, reg_infos, generic_name):
785 self.assertIsNotNone(reg_infos)
786 for reg_info in reg_infos:
787 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
788 return reg_info
789 return None
790
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000791 def decode_gdbremote_binary(self, encoded_bytes):
792 decoded_bytes = ""
793 i = 0
794 while i < len(encoded_bytes):
795 if encoded_bytes[i] == "}":
796 # Handle escaped char.
797 self.assertTrue(i + 1 < len(encoded_bytes))
798 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
799 i +=2
800 elif encoded_bytes[i] == "*":
801 # Handle run length encoding.
802 self.assertTrue(len(decoded_bytes) > 0)
803 self.assertTrue(i + 1 < len(encoded_bytes))
804 repeat_count = ord(encoded_bytes[i+1]) - 29
805 decoded_bytes += decoded_bytes[-1] * repeat_count
806 i += 2
807 else:
808 decoded_bytes += encoded_bytes[i]
809 i += 1
810 return decoded_bytes
811
812 def build_auxv_dict(self, endian, word_size, auxv_data):
813 self.assertIsNotNone(endian)
814 self.assertIsNotNone(word_size)
815 self.assertIsNotNone(auxv_data)
816
817 auxv_dict = {}
818
819 while len(auxv_data) > 0:
820 # Chop off key.
821 raw_key = auxv_data[:word_size]
822 auxv_data = auxv_data[word_size:]
823
824 # Chop of value.
825 raw_value = auxv_data[:word_size]
826 auxv_data = auxv_data[word_size:]
827
828 # Convert raw text from target endian.
829 key = unpack_endian_binary_string(endian, raw_key)
830 value = unpack_endian_binary_string(endian, raw_value)
831
832 # Handle ending entry.
833 if key == 0:
834 self.assertEquals(value, 0)
835 return auxv_dict
836
837 # The key should not already be present.
838 self.assertFalse(key in auxv_dict)
839 auxv_dict[key] = value
840
841 self.fail("should not reach here - implies required double zero entry not found")
842 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000843
844 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
845 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
846 offset = 0
847 done = False
848 decoded_data = ""
849
850 while not done:
851 # Grab the next iteration of data.
852 self.reset_test_sequence()
853 self.test_sequence.add_log_lines([
854 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000855 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000856 ], True)
857
858 context = self.expect_gdbremote_sequence()
859 self.assertIsNotNone(context)
860
861 response_type = context.get("response_type")
862 self.assertIsNotNone(response_type)
863 self.assertTrue(response_type in ["l", "m"])
864
865 # Move offset along.
866 offset += chunk_length
867
868 # Figure out if we're done. We're done if the response type is l.
869 done = response_type == "l"
870
871 # Decode binary data.
872 content_raw = context.get("content_raw")
873 if content_raw and len(content_raw) > 0:
874 self.assertIsNotNone(content_raw)
875 decoded_data += self.decode_gdbremote_binary(content_raw)
876 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000877
878 def add_interrupt_packets(self):
879 self.test_sequence.add_log_lines([
880 # Send the intterupt.
881 "read packet: {}".format(chr(03)),
882 # And wait for the stop notification.
883 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
884 ], True)
885
886 def parse_interrupt_packets(self, context):
887 self.assertIsNotNone(context.get("stop_signo"))
888 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000889 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
890
891 def add_QSaveRegisterState_packets(self, thread_id):
892 if thread_id:
893 # Use the thread suffix form.
894 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
895 else:
896 request = "read packet: $QSaveRegisterState#00"
897
898 self.test_sequence.add_log_lines([
899 request,
900 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
901 ], True)
902
903 def parse_QSaveRegisterState_response(self, context):
904 self.assertIsNotNone(context)
905
906 save_response = context.get("save_response")
907 self.assertIsNotNone(save_response)
908
909 if len(save_response) < 1 or save_response[0] == "E":
910 # error received
911 return (False, None)
912 else:
913 return (True, int(save_response))
914
915 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
916 if thread_id:
917 # Use the thread suffix form.
918 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
919 else:
920 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
921
922 self.test_sequence.add_log_lines([
923 request,
924 "send packet: $OK#00"
925 ], True)
926
927 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
928 self.assertIsNotNone(reg_infos)
929
930 successful_writes = 0
931 failed_writes = 0
932
933 for reg_info in reg_infos:
934 # Use the lldb register index added to the reg info. We're not necessarily
935 # working off a full set of register infos, so an inferred register index could be wrong.
936 reg_index = reg_info["lldb_register_index"]
937 self.assertIsNotNone(reg_index)
938
939 reg_byte_size = int(reg_info["bitsize"])/8
940 self.assertTrue(reg_byte_size > 0)
941
942 # Handle thread suffix.
943 if thread_id:
944 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
945 else:
946 p_request = "read packet: $p{:x}#00".format(reg_index)
947
948 # Read the existing value.
949 self.reset_test_sequence()
950 self.test_sequence.add_log_lines([
951 p_request,
952 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
953 ], True)
954 context = self.expect_gdbremote_sequence()
955 self.assertIsNotNone(context)
956
957 # Verify the response length.
958 p_response = context.get("p_response")
959 self.assertIsNotNone(p_response)
960 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
961
962 # Flip the value by xoring with all 1s
963 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
964 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
965 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
966
967 # Handle thread suffix for P.
968 if thread_id:
969 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
970 else:
971 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
972
973 # Write the flipped value to the register.
974 self.reset_test_sequence()
975 self.test_sequence.add_log_lines([
976 P_request,
977 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
978 ], True)
979 context = self.expect_gdbremote_sequence()
980 self.assertIsNotNone(context)
981
982 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
983 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
984 # all flipping perfectly.
985 P_response = context.get("P_response")
986 self.assertIsNotNone(P_response)
987 if P_response == "OK":
988 successful_writes += 1
989 else:
990 failed_writes += 1
991 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
992
993 # Read back the register value, ensure it matches the flipped value.
994 if P_response == "OK":
995 self.reset_test_sequence()
996 self.test_sequence.add_log_lines([
997 p_request,
998 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
999 ], True)
1000 context = self.expect_gdbremote_sequence()
1001 self.assertIsNotNone(context)
1002
1003 verify_p_response_raw = context.get("p_response")
1004 self.assertIsNotNone(verify_p_response_raw)
1005 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
1006
1007 if verify_bits != flipped_bits_int:
1008 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
1009 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
1010 successful_writes -= 1
1011 failed_writes +=1
1012
1013 return (successful_writes, failed_writes)
1014
1015 def is_bit_flippable_register(self, reg_info):
1016 if not reg_info:
1017 return False
1018 if not "set" in reg_info:
1019 return False
1020 if reg_info["set"] != "General Purpose Registers":
1021 return False
1022 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
1023 # Don't try to bit flip registers contained in another register.
1024 return False
1025 if re.match("^.s$", reg_info["name"]):
1026 # This is a 2-letter register name that ends in "s", like a segment register.
1027 # Don't try to bit flip these.
1028 return False
1029 # Okay, this looks fine-enough.
1030 return True
1031
1032 def read_register_values(self, reg_infos, endian, thread_id=None):
1033 self.assertIsNotNone(reg_infos)
1034 values = {}
1035
1036 for reg_info in reg_infos:
1037 # We append a register index when load reg infos so we can work with subsets.
1038 reg_index = reg_info.get("lldb_register_index")
1039 self.assertIsNotNone(reg_index)
1040
1041 # Handle thread suffix.
1042 if thread_id:
1043 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
1044 else:
1045 p_request = "read packet: $p{:x}#00".format(reg_index)
1046
1047 # Read it with p.
1048 self.reset_test_sequence()
1049 self.test_sequence.add_log_lines([
1050 p_request,
1051 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
1052 ], True)
1053 context = self.expect_gdbremote_sequence()
1054 self.assertIsNotNone(context)
1055
1056 # Convert value from target endian to integral.
1057 p_response = context.get("p_response")
1058 self.assertIsNotNone(p_response)
1059 self.assertTrue(len(p_response) > 0)
1060 self.assertFalse(p_response[0] == "E")
1061
1062 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
1063
Todd Fialae2202002014-06-27 22:11:56 +00001064 return values
1065
1066 def add_vCont_query_packets(self):
1067 self.test_sequence.add_log_lines([
Stephane Sezer22ed42e2014-11-13 21:39:24 +00001068 "read packet: $vCont?#49",
Todd Fialae2202002014-06-27 22:11:56 +00001069 {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } },
1070 ], True)
1071
1072 def parse_vCont_query_response(self, context):
1073 self.assertIsNotNone(context)
1074 vCont_query_response = context.get("vCont_query_response")
1075
1076 # Handle case of no vCont support at all - in which case the capture group will be none or zero length.
1077 if not vCont_query_response or len(vCont_query_response) == 0:
1078 return {}
1079
1080 return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0}
1081
1082 def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"):
1083 """Used by single step test that appears in a few different contexts."""
1084 single_step_count = 0
1085
1086 while single_step_count < max_step_count:
1087 self.assertIsNotNone(thread_id)
1088
1089 # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id.
1090 step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
1091 # print "\nstep_packet created: {}\n".format(step_packet)
1092
1093 # Single step.
1094 self.reset_test_sequence()
1095 if use_Hc_packet:
1096 self.test_sequence.add_log_lines(
1097 [# Set the continue thread.
1098 "read packet: $Hc{0:x}#00".format(thread_id),
1099 "send packet: $OK#00",
1100 ], True)
1101 self.test_sequence.add_log_lines([
1102 # Single step.
1103 step_packet,
1104 # "read packet: $vCont;s:{0:x}#00".format(thread_id),
1105 # Expect a breakpoint stop report.
1106 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
1107 ], True)
1108 context = self.expect_gdbremote_sequence()
1109 self.assertIsNotNone(context)
1110 self.assertIsNotNone(context.get("stop_signo"))
1111 self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP)
1112
1113 single_step_count += 1
1114
1115 # See if the predicate is true. If so, we're done.
1116 if predicate(args):
1117 return (True, single_step_count)
1118
1119 # The predicate didn't return true within the runaway step count.
1120 return (False, single_step_count)
1121
1122 def g_c1_c2_contents_are(self, args):
1123 """Used by single step test that appears in a few different contexts."""
1124 g_c1_address = args["g_c1_address"]
1125 g_c2_address = args["g_c2_address"]
1126 expected_g_c1 = args["expected_g_c1"]
1127 expected_g_c2 = args["expected_g_c2"]
1128
1129 # Read g_c1 and g_c2 contents.
1130 self.reset_test_sequence()
1131 self.test_sequence.add_log_lines(
1132 ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
1133 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} },
1134 "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
1135 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }],
1136 True)
1137
1138 # Run the packet stream.
1139 context = self.expect_gdbremote_sequence()
1140 self.assertIsNotNone(context)
1141
1142 # Check if what we read from inferior memory is what we are expecting.
1143 self.assertIsNotNone(context.get("g_c1_contents"))
1144 self.assertIsNotNone(context.get("g_c2_contents"))
1145
1146 return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2)
1147
1148 def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"):
1149 """Used by single step test that appears in a few different contexts."""
1150 # Start up the inferior.
1151 procs = self.prep_debug_monitor_and_inferior(
1152 inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"])
1153
1154 # Run the process
1155 self.test_sequence.add_log_lines(
1156 [# Start running after initial stop.
Stephane Sezer22ed42e2014-11-13 21:39:24 +00001157 "read packet: $c#63",
Todd Fialae2202002014-06-27 22:11:56 +00001158 # Match output line that prints the memory address of the function call entry point.
1159 # Note we require launch-only testing so we can get inferior otuput.
1160 { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
1161 "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} },
1162 # Now stop the inferior.
1163 "read packet: {}".format(chr(03)),
1164 # And wait for the stop notification.
1165 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }],
1166 True)
1167
1168 # Run the packet stream.
1169 context = self.expect_gdbremote_sequence()
1170 self.assertIsNotNone(context)
1171
1172 # Grab the main thread id.
1173 self.assertIsNotNone(context.get("stop_thread_id"))
1174 main_thread_id = int(context.get("stop_thread_id"), 16)
1175
1176 # Grab the function address.
1177 self.assertIsNotNone(context.get("function_address"))
1178 function_address = int(context.get("function_address"), 16)
1179
1180 # Grab the data addresses.
1181 self.assertIsNotNone(context.get("g_c1_address"))
1182 g_c1_address = int(context.get("g_c1_address"), 16)
1183
1184 self.assertIsNotNone(context.get("g_c2_address"))
1185 g_c2_address = int(context.get("g_c2_address"), 16)
1186
1187 # Set a breakpoint at the given address.
1188 # Note this might need to be switched per platform (ARM, mips, etc.).
1189 BREAKPOINT_KIND = 1
1190 self.reset_test_sequence()
1191 self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND)
1192 context = self.expect_gdbremote_sequence()
1193 self.assertIsNotNone(context)
1194
1195 # Remove the breakpoint.
1196 self.reset_test_sequence()
1197 self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND)
1198 context = self.expect_gdbremote_sequence()
1199 self.assertIsNotNone(context)
1200
1201 # Verify g_c1 and g_c2 match expected initial state.
1202 args = {}
1203 args["g_c1_address"] = g_c1_address
1204 args["g_c2_address"] = g_c2_address
1205 args["expected_g_c1"] = "0"
1206 args["expected_g_c2"] = "1"
1207
1208 self.assertTrue(self.g_c1_c2_contents_are(args))
1209
1210 # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code.
1211 args["expected_g_c1"] = "1"
1212 args["expected_g_c2"] = "1"
1213 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1214 self.assertTrue(state_reached)
1215
1216 # Verify we hit the next state.
1217 args["expected_g_c1"] = "1"
1218 args["expected_g_c2"] = "0"
1219 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1220 self.assertTrue(state_reached)
1221 self.assertEquals(step_count, 1)
1222
1223 # Verify we hit the next state.
1224 args["expected_g_c1"] = "0"
1225 args["expected_g_c2"] = "0"
1226 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1227 self.assertTrue(state_reached)
1228 self.assertEquals(step_count, 1)
1229
1230 # Verify we hit the next state.
1231 args["expected_g_c1"] = "0"
1232 args["expected_g_c2"] = "1"
1233 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1234 self.assertTrue(state_reached)
1235 self.assertEquals(step_count, 1)
Todd Fialaaf245d12014-06-30 21:05:18 +00001236