blob: e4999fbf189865272d1959169d047fbb228dd4b5 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fiala24189d42014-07-14 06:24:44 +00006import os
7import os.path
Todd Fialae50b2e42014-06-13 19:11:33 +00008import platform
Todd Fiala9e2d3292014-07-09 23:10:43 +00009import random
Todd Fialae2202002014-06-27 22:11:56 +000010import re
Todd Fiala31bde322014-07-26 20:39:17 +000011import select
Todd Fialae50b2e42014-06-13 19:11:33 +000012import sets
13import signal
14import socket
15import subprocess
16import sys
Todd Fiala24189d42014-07-14 06:24:44 +000017import tempfile
Todd Fialae50b2e42014-06-13 19:11:33 +000018import time
Todd Fiala24189d42014-07-14 06:24:44 +000019import unittest2
Todd Fialae50b2e42014-06-13 19:11:33 +000020from lldbtest import *
21from lldbgdbserverutils import *
22import logging
Todd Fialae50b2e42014-06-13 19:11:33 +000023
24class GdbRemoteTestCaseBase(TestBase):
25
26 mydir = TestBase.compute_mydir(__file__)
27
Todd Fialae50b2e42014-06-13 19:11:33 +000028 _TIMEOUT_SECONDS = 5
29
30 _GDBREMOTE_KILL_PACKET = "$k#6b"
31
32 _LOGGING_LEVEL = logging.WARNING
33 # _LOGGING_LEVEL = logging.DEBUG
34
Todd Fiala7306cf32014-07-29 22:30:01 +000035 # Start the inferior separately, attach to the inferior on the stub command line.
Todd Fialae50b2e42014-06-13 19:11:33 +000036 _STARTUP_ATTACH = "attach"
Todd Fiala7306cf32014-07-29 22:30:01 +000037 # Start the inferior separately, start the stub without attaching, allow the test to attach to the inferior however it wants (e.g. $vAttach;pid).
38 _STARTUP_ATTACH_MANUALLY = "attach_manually"
39 # Start the stub, and launch the inferior with an $A packet via the initial packet stream.
Todd Fialae50b2e42014-06-13 19:11:33 +000040 _STARTUP_LAUNCH = "launch"
41
42 # GDB Signal numbers that are not target-specific used for common exceptions
43 TARGET_EXC_BAD_ACCESS = 0x91
44 TARGET_EXC_BAD_INSTRUCTION = 0x92
45 TARGET_EXC_ARITHMETIC = 0x93
46 TARGET_EXC_EMULATION = 0x94
47 TARGET_EXC_SOFTWARE = 0x95
48 TARGET_EXC_BREAKPOINT = 0x96
49
50 def setUp(self):
51 TestBase.setUp(self)
52 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
53 logging.basicConfig(format=FORMAT)
54 self.logger = logging.getLogger(__name__)
55 self.logger.setLevel(self._LOGGING_LEVEL)
56 self.test_sequence = GdbRemoteTestSequence(self.logger)
57 self.set_inferior_startup_launch()
Todd Fiala9e2d3292014-07-09 23:10:43 +000058 self.port = self.get_next_port()
Todd Fiala67041192014-07-11 22:50:13 +000059 self.named_pipe_path = None
Todd Fiala24189d42014-07-14 06:24:44 +000060 self.named_pipe = None
61 self.named_pipe_fd = None
Todd Fialaf9ad21d2014-07-16 16:15:42 +000062 self.stub_sends_two_stop_notifications_on_kill = False
Todd Fiala31bde322014-07-26 20:39:17 +000063 self.stub_hostname = "localhost"
Todd Fialae50b2e42014-06-13 19:11:33 +000064
Todd Fiala9e2d3292014-07-09 23:10:43 +000065 def get_next_port(self):
66 return 12000 + random.randint(0,3999)
Todd Fialae50b2e42014-06-13 19:11:33 +000067
68 def reset_test_sequence(self):
69 self.test_sequence = GdbRemoteTestSequence(self.logger)
70
Todd Fiala24189d42014-07-14 06:24:44 +000071 def create_named_pipe(self):
72 # Create a temp dir and name for a pipe.
73 temp_dir = tempfile.mkdtemp()
74 named_pipe_path = os.path.join(temp_dir, "stub_port_number")
75
76 # Create the named pipe.
77 os.mkfifo(named_pipe_path)
78
79 # Open the read side of the pipe in non-blocking mode. This will return right away, ready or not.
80 named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK)
81
82 # Create the file for the named pipe. Note this will follow semantics of
83 # a non-blocking read side of a named pipe, which has different semantics
84 # than a named pipe opened for read in non-blocking mode.
85 named_pipe = os.fdopen(named_pipe_fd, "r")
86 self.assertIsNotNone(named_pipe)
87
88 def shutdown_named_pipe():
89 # Close the pipe.
90 try:
91 named_pipe.close()
92 except:
93 print "failed to close named pipe"
94 None
95
96 # Delete the pipe.
97 try:
98 os.remove(named_pipe_path)
99 except:
100 print "failed to delete named pipe: {}".format(named_pipe_path)
101 None
102
103 # Delete the temp directory.
104 try:
105 os.rmdir(temp_dir)
106 except:
107 print "failed to delete temp dir: {}, directory contents: '{}'".format(temp_dir, os.listdir(temp_dir))
108 None
109
110 # Add the shutdown hook to clean up the named pipe.
111 self.addTearDownHook(shutdown_named_pipe)
112
113 # Clear the port so the stub selects a port number.
114 self.port = 0
115
116 return (named_pipe_path, named_pipe, named_pipe_fd)
117
118 def get_stub_port_from_named_socket(self, read_timeout_seconds=5):
119 # Wait for something to read with a max timeout.
120 (ready_readers, _, _) = select.select([self.named_pipe_fd], [], [], read_timeout_seconds)
121 self.assertIsNotNone(ready_readers, "write side of pipe has not written anything - stub isn't writing to pipe.")
122 self.assertNotEqual(len(ready_readers), 0, "write side of pipe has not written anything - stub isn't writing to pipe.")
123
124 # Read the port from the named pipe.
125 stub_port_raw = self.named_pipe.read()
126 self.assertIsNotNone(stub_port_raw)
127 self.assertNotEqual(len(stub_port_raw), 0, "no content to read on pipe")
128
129 # Trim null byte, convert to int.
130 stub_port_raw = stub_port_raw[:-1]
131 stub_port = int(stub_port_raw)
132 self.assertTrue(stub_port > 0)
133
134 return stub_port
135
136 def init_llgs_test(self, use_named_pipe=True):
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000137 if lldb.remote_platform:
138 # Remote platforms don't support named pipe based port negotiation
139 use_named_pipe = False
140
141 platform = self.dbg.GetSelectedPlatform()
142
143 shell_command = lldb.SBPlatformShellCommand("echo $PPID")
144 err = platform.Run(shell_command)
145 if err.Fail():
146 raise Exception("remote_platform.RunShellCommand('echo $PPID') failed: %s" % err)
147 pid = shell_command.GetOutput().strip()
148
149 shell_command = lldb.SBPlatformShellCommand("readlink /proc/%s/exe" % pid)
150 err = platform.Run(shell_command)
151 if err.Fail():
152 raise Exception("remote_platform.RunShellCommand('readlink /proc/%d/exe') failed: %s" % (pid, err))
153 self.debug_monitor_exe = shell_command.GetOutput().strip()
154 dname = self.dbg.GetSelectedPlatform().GetWorkingDirectory()
155 else:
156 self.debug_monitor_exe = get_lldb_server_exe()
157 if not self.debug_monitor_exe:
158 self.skipTest("lldb-server exe not found")
159 dname = os.path.join(os.environ["LLDB_TEST"], os.environ["LLDB_SESSION_DIRNAME"])
160
161 self.debug_monitor_extra_args = ["gdbserver", "-c", "log enable -T -f {}/process-{}.log lldb break process thread".format(dname, self.id()), "-c", "log enable -T -f {}/packets-{}.log gdb-remote packets".format(dname, self.id())]
Todd Fiala24189d42014-07-14 06:24:44 +0000162 if use_named_pipe:
163 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialae50b2e42014-06-13 19:11:33 +0000164
Todd Fiala24189d42014-07-14 06:24:44 +0000165 def init_debugserver_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000166 self.debug_monitor_exe = get_debugserver_exe()
167 if not self.debug_monitor_exe:
168 self.skipTest("debugserver exe not found")
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000169 self.debug_monitor_extra_args = ["--log-file=/tmp/packets-{}.log".format(self._testMethodName), "--log-flags=0x800000"]
Todd Fiala24189d42014-07-14 06:24:44 +0000170 if use_named_pipe:
171 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialaf9ad21d2014-07-16 16:15:42 +0000172 # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification
173 # when the process truly dies.
174 self.stub_sends_two_stop_notifications_on_kill = True
Todd Fialae50b2e42014-06-13 19:11:33 +0000175
Tamas Berghammer27c8d362015-03-13 14:32:25 +0000176 def forward_adb_port(self, source, target, direction):
177 def remove_port_forward():
178 subprocess.call(["adb", direction, "--remove", "tcp:%d" % source])
179
180 subprocess.call(["adb", direction, "tcp:%d" % source, "tcp:%d" % target])
181 self.addTearDownHook(remove_port_forward)
182
Todd Fialae50b2e42014-06-13 19:11:33 +0000183 def create_socket(self):
184 sock = socket.socket()
185 logger = self.logger
186
187 def shutdown_socket():
188 if sock:
189 try:
Robert Flack8cc4cf12015-03-06 14:36:33 +0000190 # send the kill packet so lldb-server shuts down gracefully
Todd Fialae50b2e42014-06-13 19:11:33 +0000191 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
192 except:
193 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
194
195 try:
196 sock.close()
197 except:
198 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
199
200 self.addTearDownHook(shutdown_socket)
201
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000202 triple = self.dbg.GetSelectedPlatform().GetTriple()
203 if re.match(".*-.*-.*-android", triple):
Tamas Berghammer27c8d362015-03-13 14:32:25 +0000204 self.forward_adb_port(self.port, self.port, "forward")
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000205
Todd Fiala31bde322014-07-26 20:39:17 +0000206 connect_info = (self.stub_hostname, self.port)
Todd Fiala24189d42014-07-14 06:24:44 +0000207 # print "connecting to stub on {}:{}".format(connect_info[0], connect_info[1])
208 sock.connect(connect_info)
209
Todd Fialae50b2e42014-06-13 19:11:33 +0000210 return sock
211
212 def set_inferior_startup_launch(self):
213 self._inferior_startup = self._STARTUP_LAUNCH
214
215 def set_inferior_startup_attach(self):
216 self._inferior_startup = self._STARTUP_ATTACH
217
Todd Fiala7306cf32014-07-29 22:30:01 +0000218 def set_inferior_startup_attach_manually(self):
219 self._inferior_startup = self._STARTUP_ATTACH_MANUALLY
220
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000221 def get_debug_monitor_command_line_args(self, attach_pid=None):
222 commandline_args = self.debug_monitor_extra_args + ["localhost:{}".format(self.port)]
Todd Fialae50b2e42014-06-13 19:11:33 +0000223 if attach_pid:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000224 commandline_args += ["--attach=%d" % attach_pid]
Todd Fiala67041192014-07-11 22:50:13 +0000225 if self.named_pipe_path:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000226 commandline_args += ["--named-pipe", self.named_pipe_path]
227 return commandline_args
228
229 def run_platform_command(self, cmd):
230 platform = self.dbg.GetSelectedPlatform()
231 shell_command = lldb.SBPlatformShellCommand(cmd)
232 err = platform.Run(shell_command)
233 return (err, shell_command.GetOutput())
Todd Fiala31bde322014-07-26 20:39:17 +0000234
235 def launch_debug_monitor(self, attach_pid=None, logfile=None):
236 # Create the command line.
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000237 commandline_args = self.get_debug_monitor_command_line_args(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000238
Todd Fiala8aae4f42014-06-13 23:34:17 +0000239 # Start the server.
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000240 server = self.spawnSubprocess(self.debug_monitor_exe, commandline_args, install_remote=False)
241 self.addTearDownHook(self.cleanupSubprocesses)
Todd Fiala24189d42014-07-14 06:24:44 +0000242 self.assertIsNotNone(server)
Todd Fiala24189d42014-07-14 06:24:44 +0000243
244 # If we're receiving the stub's listening port from the named pipe, do that here.
245 if self.named_pipe:
246 self.port = self.get_stub_port_from_named_socket()
Todd Fialae50b2e42014-06-13 19:11:33 +0000247
Todd Fiala8aae4f42014-06-13 23:34:17 +0000248 return server
249
250 def connect_to_debug_monitor(self, attach_pid=None):
Todd Fiala24189d42014-07-14 06:24:44 +0000251 if self.named_pipe:
252 # Create the stub.
253 server = self.launch_debug_monitor(attach_pid=attach_pid)
254 self.assertIsNotNone(server)
255
256 def shutdown_debug_monitor():
257 try:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000258 server.terminate()
Todd Fiala24189d42014-07-14 06:24:44 +0000259 except:
260 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
261 self.addTearDownHook(shutdown_debug_monitor)
262
263 # Schedule debug monitor to be shut down during teardown.
264 logger = self.logger
265
266 # Attach to the stub and return a socket opened to it.
267 self.sock = self.create_socket()
268 return server
269
270 # We're using a random port algorithm to try not to collide with other ports,
271 # and retry a max # times.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000272 attempts = 0
273 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000274
Todd Fiala8aae4f42014-06-13 23:34:17 +0000275 while attempts < MAX_ATTEMPTS:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000276 server = self.launch_debug_monitor(attach_pid=attach_pid)
277
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000278 # Schedule debug monitor to be shut down during teardown.
279 logger = self.logger
280 def shutdown_debug_monitor():
Todd Fiala9e2d3292014-07-09 23:10:43 +0000281 try:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000282 server.terminate()
283 except:
284 logger.warning("failed to terminate server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
285 self.addTearDownHook(shutdown_debug_monitor)
286
287 # Create a socket to talk to the server
288 try:
289 self.sock = self.create_socket()
290 return server
291 except socket.error as serr:
292 # We're only trying to handle connection refused.
293 if serr.errno != errno.ECONNREFUSED:
294 raise serr
295 # We should close the server here to be safe.
296 server.terminate()
Todd Fiala9e2d3292014-07-09 23:10:43 +0000297
298 # Increment attempts.
299 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
300 attempts += 1
301
302 # And wait a random length of time before next attempt, to avoid collisions.
303 time.sleep(random.randint(1,5))
304
305 # Now grab a new port number.
306 self.port = self.get_next_port()
Todd Fiala8aae4f42014-06-13 23:34:17 +0000307
308 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000309
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000310 def launch_process_for_attach(self, inferior_args=None, sleep_seconds=3, exe_path=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000311 # We're going to start a child process that the debug monitor stub can later attach to.
312 # This process needs to be started so that it just hangs around for a while. We'll
313 # have it sleep.
Todd Fiala58a2f662014-08-12 17:02:07 +0000314 if not exe_path:
315 exe_path = os.path.abspath("a.out")
Todd Fialae50b2e42014-06-13 19:11:33 +0000316
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000317 args = []
Todd Fialae50b2e42014-06-13 19:11:33 +0000318 if inferior_args:
319 args.extend(inferior_args)
320 if sleep_seconds:
321 args.append("sleep:%d" % sleep_seconds)
322
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000323 return self.spawnSubprocess(exe_path, args)
Todd Fialae50b2e42014-06-13 19:11:33 +0000324
Todd Fiala58a2f662014-08-12 17:02:07 +0000325 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3, inferior_exe_path=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000326 """Prep the debug monitor, the inferior, and the expected packet stream.
327
328 Handle the separate cases of using the debug monitor in attach-to-inferior mode
329 and in launch-inferior mode.
330
331 For attach-to-inferior mode, the inferior process is first started, then
332 the debug monitor is started in attach to pid mode (using --attach on the
333 stub command line), and the no-ack-mode setup is appended to the packet
334 stream. The packet stream is not yet executed, ready to have more expected
335 packet entries added to it.
336
337 For launch-inferior mode, the stub is first started, then no ack mode is
338 setup on the expected packet stream, then the verified launch packets are added
339 to the expected socket stream. The packet stream is not yet executed, ready
340 to have more expected packet entries added to it.
341
342 The return value is:
343 {inferior:<inferior>, server:<server>}
344 """
345 inferior = None
346 attach_pid = None
347
Todd Fiala7306cf32014-07-29 22:30:01 +0000348 if self._inferior_startup == self._STARTUP_ATTACH or self._inferior_startup == self._STARTUP_ATTACH_MANUALLY:
Todd Fialae50b2e42014-06-13 19:11:33 +0000349 # Launch the process that we'll use as the inferior.
Todd Fiala58a2f662014-08-12 17:02:07 +0000350 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds, exe_path=inferior_exe_path)
Todd Fialae50b2e42014-06-13 19:11:33 +0000351 self.assertIsNotNone(inferior)
352 self.assertTrue(inferior.pid > 0)
Todd Fiala7306cf32014-07-29 22:30:01 +0000353 if self._inferior_startup == self._STARTUP_ATTACH:
354 # In this case, we want the stub to attach via the command line, so set the command line attach pid here.
355 attach_pid = inferior.pid
Todd Fialae50b2e42014-06-13 19:11:33 +0000356
Todd Fialae50b2e42014-06-13 19:11:33 +0000357 if self._inferior_startup == self._STARTUP_LAUNCH:
358 # Build launch args
Todd Fiala58a2f662014-08-12 17:02:07 +0000359 if not inferior_exe_path:
360 inferior_exe_path = os.path.abspath("a.out")
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000361
362 if lldb.remote_platform:
363 remote_work_dir = lldb.remote_platform.GetWorkingDirectory()
364 remote_path = os.path.join(remote_work_dir, os.path.basename(inferior_exe_path))
365 remote_file_spec = lldb.SBFileSpec(remote_path, False)
366 err = lldb.remote_platform.Install(lldb.SBFileSpec(inferior_exe_path, True), remote_file_spec)
367 if err.Fail():
368 raise Exception("remote_platform.Install('%s', '%s') failed: %s" % (inferior_exe_path, remote_path, err))
369 inferior_exe_path = remote_path
370
Todd Fiala58a2f662014-08-12 17:02:07 +0000371 launch_args = [inferior_exe_path]
Todd Fialae50b2e42014-06-13 19:11:33 +0000372 if inferior_args:
373 launch_args.extend(inferior_args)
374
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000375 # Launch the debug monitor stub, attaching to the inferior.
376 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
377 self.assertIsNotNone(server)
378
Todd Fialae50b2e42014-06-13 19:11:33 +0000379 # Build the expected protocol stream
380 self.add_no_ack_remote_stream()
381 if self._inferior_startup == self._STARTUP_LAUNCH:
382 self.add_verified_launch_packets(launch_args)
383
384 return {"inferior":inferior, "server":server}
385
Todd Fiala31bde322014-07-26 20:39:17 +0000386 def expect_socket_recv(self, sock, expected_content_regex, timeout_seconds):
387 response = ""
388 timeout_time = time.time() + timeout_seconds
389
390 while not expected_content_regex.match(response) and time.time() < timeout_time:
391 can_read, _, _ = select.select([sock], [], [], timeout_seconds)
392 if can_read and sock in can_read:
393 recv_bytes = sock.recv(4096)
394 if recv_bytes:
395 response += recv_bytes
396
397 self.assertTrue(expected_content_regex.match(response))
398
399 def expect_socket_send(self, sock, content, timeout_seconds):
400 request_bytes_remaining = content
401 timeout_time = time.time() + timeout_seconds
402
403 while len(request_bytes_remaining) > 0 and time.time() < timeout_time:
404 _, can_write, _ = select.select([], [sock], [], timeout_seconds)
405 if can_write and sock in can_write:
406 written_byte_count = sock.send(request_bytes_remaining)
407 request_bytes_remaining = request_bytes_remaining[written_byte_count:]
408 self.assertEquals(len(request_bytes_remaining), 0)
409
410 def do_handshake(self, stub_socket, timeout_seconds=5):
411 # Write the ack.
412 self.expect_socket_send(stub_socket, "+", timeout_seconds)
413
414 # Send the start no ack mode packet.
415 NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0"
416 bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST)
417 self.assertEquals(bytes_sent, len(NO_ACK_MODE_REQUEST))
418
419 # Receive the ack and "OK"
420 self.expect_socket_recv(stub_socket, re.compile(r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds)
421
422 # Send the final ack.
423 self.expect_socket_send(stub_socket, "+", timeout_seconds)
424
Todd Fialae50b2e42014-06-13 19:11:33 +0000425 def add_no_ack_remote_stream(self):
426 self.test_sequence.add_log_lines(
427 ["read packet: +",
428 "read packet: $QStartNoAckMode#b0",
429 "send packet: +",
430 "send packet: $OK#9a",
431 "read packet: +"],
432 True)
433
434 def add_verified_launch_packets(self, launch_args):
435 self.test_sequence.add_log_lines(
436 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
437 "send packet: $OK#00",
438 "read packet: $qLaunchSuccess#a5",
439 "send packet: $OK#00"],
440 True)
441
442 def add_thread_suffix_request_packets(self):
443 self.test_sequence.add_log_lines(
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000444 ["read packet: $QThreadSuffixSupported#e4",
Todd Fialae50b2e42014-06-13 19:11:33 +0000445 "send packet: $OK#00",
446 ], True)
447
448 def add_process_info_collection_packets(self):
449 self.test_sequence.add_log_lines(
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000450 ["read packet: $qProcessInfo#dc",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000451 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000452 True)
453
454 _KNOWN_PROCESS_INFO_KEYS = [
455 "pid",
456 "parent-pid",
457 "real-uid",
458 "real-gid",
459 "effective-uid",
460 "effective-gid",
461 "cputype",
462 "cpusubtype",
463 "ostype",
Todd Fialac540dd02014-08-26 18:21:02 +0000464 "triple",
Todd Fialae50b2e42014-06-13 19:11:33 +0000465 "vendor",
466 "endian",
467 "ptrsize"
468 ]
469
470 def parse_process_info_response(self, context):
471 # Ensure we have a process info response.
472 self.assertIsNotNone(context)
473 process_info_raw = context.get("process_info_raw")
474 self.assertIsNotNone(process_info_raw)
475
476 # Pull out key:value; pairs.
477 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
478
479 # Validate keys are known.
480 for (key, val) in process_info_dict.items():
481 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
482 self.assertIsNotNone(val)
483
484 return process_info_dict
485
486 def add_register_info_collection_packets(self):
487 self.test_sequence.add_log_lines(
488 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
489 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
490 "save_key":"reg_info_responses" } ],
491 True)
492
493 def parse_register_info_packets(self, context):
494 """Return an array of register info dictionaries, one per register info."""
495 reg_info_responses = context.get("reg_info_responses")
496 self.assertIsNotNone(reg_info_responses)
497
498 # Parse register infos.
499 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
500
Todd Fiala50a211b2014-06-14 22:00:36 +0000501 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000502 if not timeout_seconds:
503 timeout_seconds = self._TIMEOUT_SECONDS
504 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000505
506 _KNOWN_REGINFO_KEYS = [
507 "name",
508 "alt-name",
509 "bitsize",
510 "offset",
511 "encoding",
512 "format",
513 "set",
514 "gcc",
515 "dwarf",
516 "generic",
517 "container-regs",
518 "invalidate-regs"
519 ]
520
521 def assert_valid_reg_info(self, reg_info):
522 # Assert we know about all the reginfo keys parsed.
523 for key in reg_info:
524 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
525
526 # Check the bare-minimum expected set of register info keys.
527 self.assertTrue("name" in reg_info)
528 self.assertTrue("bitsize" in reg_info)
529 self.assertTrue("offset" in reg_info)
530 self.assertTrue("encoding" in reg_info)
531 self.assertTrue("format" in reg_info)
532
533 def find_pc_reg_info(self, reg_infos):
534 lldb_reg_index = 0
535 for reg_info in reg_infos:
536 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
537 return (lldb_reg_index, reg_info)
538 lldb_reg_index += 1
539
540 return (None, None)
541
542 def add_lldb_register_index(self, reg_infos):
543 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
544
545 We'll use this when we want to call packets like P/p with a register index but do so
546 on only a subset of the full register info set.
547 """
548 self.assertIsNotNone(reg_infos)
549
550 reg_index = 0
551 for reg_info in reg_infos:
552 reg_info["lldb_register_index"] = reg_index
553 reg_index += 1
554
555 def add_query_memory_region_packets(self, address):
556 self.test_sequence.add_log_lines(
557 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
558 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
559 True)
560
Todd Fialac30281a2014-06-14 03:03:23 +0000561 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000562 self.assertIsNotNone(key_val_text)
563 kv_dict = {}
564 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000565 key = match.group(1)
566 val = match.group(2)
567 if key in kv_dict:
568 if allow_dupes:
569 if type(kv_dict[key]) == list:
570 kv_dict[key].append(val)
571 else:
572 # Promote to list
573 kv_dict[key] = [kv_dict[key], val]
574 else:
575 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
576 else:
577 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000578 return kv_dict
579
580 def parse_memory_region_packet(self, context):
581 # Ensure we have a context.
582 self.assertIsNotNone(context.get("memory_region_response"))
583
584 # Pull out key:value; pairs.
585 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
586
587 # Validate keys are known.
588 for (key, val) in mem_region_dict.items():
589 self.assertTrue(key in ["start", "size", "permissions", "error"])
590 self.assertIsNotNone(val)
591
592 # Return the dictionary of key-value pairs for the memory region.
593 return mem_region_dict
594
595 def assert_address_within_memory_region(self, test_address, mem_region_dict):
596 self.assertIsNotNone(mem_region_dict)
597 self.assertTrue("start" in mem_region_dict)
598 self.assertTrue("size" in mem_region_dict)
599
600 range_start = int(mem_region_dict["start"], 16)
601 range_size = int(mem_region_dict["size"], 16)
602 range_end = range_start + range_size
603
604 if test_address < range_start:
605 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
606 elif test_address >= range_end:
607 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
608
609 def add_threadinfo_collection_packets(self):
610 self.test_sequence.add_log_lines(
611 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
612 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
613 "save_key":"threadinfo_responses" } ],
614 True)
615
616 def parse_threadinfo_packets(self, context):
617 """Return an array of thread ids (decimal ints), one per thread."""
618 threadinfo_responses = context.get("threadinfo_responses")
619 self.assertIsNotNone(threadinfo_responses)
620
621 thread_ids = []
622 for threadinfo_response in threadinfo_responses:
623 new_thread_infos = parse_threadinfo_response(threadinfo_response)
624 thread_ids.extend(new_thread_infos)
625 return thread_ids
626
627 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
628 start_time = time.time()
629 timeout_time = start_time + timeout_seconds
630
631 actual_thread_count = 0
632 while actual_thread_count < thread_count:
633 self.reset_test_sequence()
634 self.add_threadinfo_collection_packets()
635
636 context = self.expect_gdbremote_sequence()
637 self.assertIsNotNone(context)
638
639 threads = self.parse_threadinfo_packets(context)
640 self.assertIsNotNone(threads)
641
642 actual_thread_count = len(threads)
643
644 if time.time() > timeout_time:
645 raise Exception(
646 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
647 timeout_seconds, thread_count, actual_thread_count))
648
649 return threads
650
651 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
652 self.test_sequence.add_log_lines(
653 [# Set the breakpoint.
654 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
655 # Verify the stub could set it.
656 "send packet: $OK#00",
657 ], True)
658
659 if (do_continue):
660 self.test_sequence.add_log_lines(
661 [# Continue the inferior.
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000662 "read packet: $c#63",
Todd Fialae50b2e42014-06-13 19:11:33 +0000663 # Expect a breakpoint stop report.
664 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
665 ], True)
666
667 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
668 self.test_sequence.add_log_lines(
669 [# Remove the breakpoint.
670 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
671 # Verify the stub could unset it.
672 "send packet: $OK#00",
673 ], True)
674
675 def add_qSupported_packets(self):
676 self.test_sequence.add_log_lines(
677 ["read packet: $qSupported#00",
678 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
679 ], True)
680
681 _KNOWN_QSUPPORTED_STUB_FEATURES = [
682 "augmented-libraries-svr4-read",
683 "PacketSize",
684 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000685 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000686 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000687 "qXfer:auxv:read",
688 "qXfer:libraries:read",
689 "qXfer:libraries-svr4:read",
690 ]
691
692 def parse_qSupported_response(self, context):
693 self.assertIsNotNone(context)
694
695 raw_response = context.get("qSupported_response")
696 self.assertIsNotNone(raw_response)
697
698 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
699 # +,-,? is stripped from the key and set as the value.
700 supported_dict = {}
701 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
702 key = match.group(1)
703 val = match.group(3)
704
705 # key=val: store as is
706 if val and len(val) > 0:
707 supported_dict[key] = val
708 else:
709 if len(key) < 2:
710 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
711 supported_type = key[-1]
712 key = key[:-1]
713 if not supported_type in ["+", "-", "?"]:
714 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
715 supported_dict[key] = supported_type
716 # Ensure we know the supported element
717 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
718 raise Exception("unknown qSupported stub feature reported: %s" % key)
719
720 return supported_dict
721
722 def run_process_then_stop(self, run_seconds=1):
723 # Tell the stub to continue.
724 self.test_sequence.add_log_lines(
Stephane Sezerb6e81922014-11-20 18:50:46 +0000725 ["read packet: $vCont;c#a8"],
Todd Fialae50b2e42014-06-13 19:11:33 +0000726 True)
727 context = self.expect_gdbremote_sequence()
728
729 # Wait for run_seconds.
730 time.sleep(run_seconds)
731
732 # Send an interrupt, capture a T response.
733 self.reset_test_sequence()
734 self.test_sequence.add_log_lines(
735 ["read packet: {}".format(chr(03)),
736 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
737 True)
738 context = self.expect_gdbremote_sequence()
739 self.assertIsNotNone(context)
740 self.assertIsNotNone(context.get("stop_result"))
741
742 return context
743
744 def select_modifiable_register(self, reg_infos):
745 """Find a register that can be read/written freely."""
746 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
747
748 # First check for the first register from the preferred register name set.
749 alternative_register_index = None
750
751 self.assertIsNotNone(reg_infos)
752 for reg_info in reg_infos:
753 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
754 # We found a preferred register. Use it.
755 return reg_info["lldb_register_index"]
756 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
757 # A frame pointer register will do as a register to modify temporarily.
758 alternative_register_index = reg_info["lldb_register_index"]
759
760 # We didn't find a preferred register. Return whatever alternative register
761 # we found, if any.
762 return alternative_register_index
763
764 def extract_registers_from_stop_notification(self, stop_key_vals_text):
765 self.assertIsNotNone(stop_key_vals_text)
766 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
767
768 registers = {}
769 for (key, val) in kv_dict.items():
Stephane Sezer5109a792014-11-14 09:46:21 +0000770 if re.match(r"^[0-9a-fA-F]+$", key):
Todd Fialae50b2e42014-06-13 19:11:33 +0000771 registers[int(key, 16)] = val
772 return registers
773
774 def gather_register_infos(self):
775 self.reset_test_sequence()
776 self.add_register_info_collection_packets()
777
778 context = self.expect_gdbremote_sequence()
779 self.assertIsNotNone(context)
780
781 reg_infos = self.parse_register_info_packets(context)
782 self.assertIsNotNone(reg_infos)
783 self.add_lldb_register_index(reg_infos)
784
785 return reg_infos
786
787 def find_generic_register_with_name(self, reg_infos, generic_name):
788 self.assertIsNotNone(reg_infos)
789 for reg_info in reg_infos:
790 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
791 return reg_info
792 return None
793
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000794 def decode_gdbremote_binary(self, encoded_bytes):
795 decoded_bytes = ""
796 i = 0
797 while i < len(encoded_bytes):
798 if encoded_bytes[i] == "}":
799 # Handle escaped char.
800 self.assertTrue(i + 1 < len(encoded_bytes))
801 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
802 i +=2
803 elif encoded_bytes[i] == "*":
804 # Handle run length encoding.
805 self.assertTrue(len(decoded_bytes) > 0)
806 self.assertTrue(i + 1 < len(encoded_bytes))
807 repeat_count = ord(encoded_bytes[i+1]) - 29
808 decoded_bytes += decoded_bytes[-1] * repeat_count
809 i += 2
810 else:
811 decoded_bytes += encoded_bytes[i]
812 i += 1
813 return decoded_bytes
814
815 def build_auxv_dict(self, endian, word_size, auxv_data):
816 self.assertIsNotNone(endian)
817 self.assertIsNotNone(word_size)
818 self.assertIsNotNone(auxv_data)
819
820 auxv_dict = {}
821
822 while len(auxv_data) > 0:
823 # Chop off key.
824 raw_key = auxv_data[:word_size]
825 auxv_data = auxv_data[word_size:]
826
827 # Chop of value.
828 raw_value = auxv_data[:word_size]
829 auxv_data = auxv_data[word_size:]
830
831 # Convert raw text from target endian.
832 key = unpack_endian_binary_string(endian, raw_key)
833 value = unpack_endian_binary_string(endian, raw_value)
834
835 # Handle ending entry.
836 if key == 0:
837 self.assertEquals(value, 0)
838 return auxv_dict
839
840 # The key should not already be present.
841 self.assertFalse(key in auxv_dict)
842 auxv_dict[key] = value
843
844 self.fail("should not reach here - implies required double zero entry not found")
845 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000846
847 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
848 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
849 offset = 0
850 done = False
851 decoded_data = ""
852
853 while not done:
854 # Grab the next iteration of data.
855 self.reset_test_sequence()
856 self.test_sequence.add_log_lines([
857 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000858 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000859 ], True)
860
861 context = self.expect_gdbremote_sequence()
862 self.assertIsNotNone(context)
863
864 response_type = context.get("response_type")
865 self.assertIsNotNone(response_type)
866 self.assertTrue(response_type in ["l", "m"])
867
868 # Move offset along.
869 offset += chunk_length
870
871 # Figure out if we're done. We're done if the response type is l.
872 done = response_type == "l"
873
874 # Decode binary data.
875 content_raw = context.get("content_raw")
876 if content_raw and len(content_raw) > 0:
877 self.assertIsNotNone(content_raw)
878 decoded_data += self.decode_gdbremote_binary(content_raw)
879 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000880
881 def add_interrupt_packets(self):
882 self.test_sequence.add_log_lines([
883 # Send the intterupt.
884 "read packet: {}".format(chr(03)),
885 # And wait for the stop notification.
886 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
887 ], True)
888
889 def parse_interrupt_packets(self, context):
890 self.assertIsNotNone(context.get("stop_signo"))
891 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000892 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
893
894 def add_QSaveRegisterState_packets(self, thread_id):
895 if thread_id:
896 # Use the thread suffix form.
897 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
898 else:
899 request = "read packet: $QSaveRegisterState#00"
900
901 self.test_sequence.add_log_lines([
902 request,
903 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
904 ], True)
905
906 def parse_QSaveRegisterState_response(self, context):
907 self.assertIsNotNone(context)
908
909 save_response = context.get("save_response")
910 self.assertIsNotNone(save_response)
911
912 if len(save_response) < 1 or save_response[0] == "E":
913 # error received
914 return (False, None)
915 else:
916 return (True, int(save_response))
917
918 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
919 if thread_id:
920 # Use the thread suffix form.
921 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
922 else:
923 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
924
925 self.test_sequence.add_log_lines([
926 request,
927 "send packet: $OK#00"
928 ], True)
929
930 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
931 self.assertIsNotNone(reg_infos)
932
933 successful_writes = 0
934 failed_writes = 0
935
936 for reg_info in reg_infos:
937 # Use the lldb register index added to the reg info. We're not necessarily
938 # working off a full set of register infos, so an inferred register index could be wrong.
939 reg_index = reg_info["lldb_register_index"]
940 self.assertIsNotNone(reg_index)
941
942 reg_byte_size = int(reg_info["bitsize"])/8
943 self.assertTrue(reg_byte_size > 0)
944
945 # Handle thread suffix.
946 if thread_id:
947 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
948 else:
949 p_request = "read packet: $p{:x}#00".format(reg_index)
950
951 # Read the existing value.
952 self.reset_test_sequence()
953 self.test_sequence.add_log_lines([
954 p_request,
955 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
956 ], True)
957 context = self.expect_gdbremote_sequence()
958 self.assertIsNotNone(context)
959
960 # Verify the response length.
961 p_response = context.get("p_response")
962 self.assertIsNotNone(p_response)
963 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
964
965 # Flip the value by xoring with all 1s
966 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
967 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
968 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
969
970 # Handle thread suffix for P.
971 if thread_id:
972 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
973 else:
974 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
975
976 # Write the flipped value to the register.
977 self.reset_test_sequence()
978 self.test_sequence.add_log_lines([
979 P_request,
980 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
981 ], True)
982 context = self.expect_gdbremote_sequence()
983 self.assertIsNotNone(context)
984
985 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
986 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
987 # all flipping perfectly.
988 P_response = context.get("P_response")
989 self.assertIsNotNone(P_response)
990 if P_response == "OK":
991 successful_writes += 1
992 else:
993 failed_writes += 1
994 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
995
996 # Read back the register value, ensure it matches the flipped value.
997 if P_response == "OK":
998 self.reset_test_sequence()
999 self.test_sequence.add_log_lines([
1000 p_request,
1001 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
1002 ], True)
1003 context = self.expect_gdbremote_sequence()
1004 self.assertIsNotNone(context)
1005
1006 verify_p_response_raw = context.get("p_response")
1007 self.assertIsNotNone(verify_p_response_raw)
1008 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
1009
1010 if verify_bits != flipped_bits_int:
1011 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
1012 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
1013 successful_writes -= 1
1014 failed_writes +=1
1015
1016 return (successful_writes, failed_writes)
1017
1018 def is_bit_flippable_register(self, reg_info):
1019 if not reg_info:
1020 return False
1021 if not "set" in reg_info:
1022 return False
1023 if reg_info["set"] != "General Purpose Registers":
1024 return False
1025 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
1026 # Don't try to bit flip registers contained in another register.
1027 return False
1028 if re.match("^.s$", reg_info["name"]):
1029 # This is a 2-letter register name that ends in "s", like a segment register.
1030 # Don't try to bit flip these.
1031 return False
1032 # Okay, this looks fine-enough.
1033 return True
1034
1035 def read_register_values(self, reg_infos, endian, thread_id=None):
1036 self.assertIsNotNone(reg_infos)
1037 values = {}
1038
1039 for reg_info in reg_infos:
1040 # We append a register index when load reg infos so we can work with subsets.
1041 reg_index = reg_info.get("lldb_register_index")
1042 self.assertIsNotNone(reg_index)
1043
1044 # Handle thread suffix.
1045 if thread_id:
1046 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
1047 else:
1048 p_request = "read packet: $p{:x}#00".format(reg_index)
1049
1050 # Read it with p.
1051 self.reset_test_sequence()
1052 self.test_sequence.add_log_lines([
1053 p_request,
1054 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
1055 ], True)
1056 context = self.expect_gdbremote_sequence()
1057 self.assertIsNotNone(context)
1058
1059 # Convert value from target endian to integral.
1060 p_response = context.get("p_response")
1061 self.assertIsNotNone(p_response)
1062 self.assertTrue(len(p_response) > 0)
1063 self.assertFalse(p_response[0] == "E")
1064
1065 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
1066
Todd Fialae2202002014-06-27 22:11:56 +00001067 return values
1068
1069 def add_vCont_query_packets(self):
1070 self.test_sequence.add_log_lines([
Stephane Sezer22ed42e2014-11-13 21:39:24 +00001071 "read packet: $vCont?#49",
Todd Fialae2202002014-06-27 22:11:56 +00001072 {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } },
1073 ], True)
1074
1075 def parse_vCont_query_response(self, context):
1076 self.assertIsNotNone(context)
1077 vCont_query_response = context.get("vCont_query_response")
1078
1079 # Handle case of no vCont support at all - in which case the capture group will be none or zero length.
1080 if not vCont_query_response or len(vCont_query_response) == 0:
1081 return {}
1082
1083 return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0}
1084
1085 def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"):
1086 """Used by single step test that appears in a few different contexts."""
1087 single_step_count = 0
1088
1089 while single_step_count < max_step_count:
1090 self.assertIsNotNone(thread_id)
1091
1092 # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id.
1093 step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
1094 # print "\nstep_packet created: {}\n".format(step_packet)
1095
1096 # Single step.
1097 self.reset_test_sequence()
1098 if use_Hc_packet:
1099 self.test_sequence.add_log_lines(
1100 [# Set the continue thread.
1101 "read packet: $Hc{0:x}#00".format(thread_id),
1102 "send packet: $OK#00",
1103 ], True)
1104 self.test_sequence.add_log_lines([
1105 # Single step.
1106 step_packet,
1107 # "read packet: $vCont;s:{0:x}#00".format(thread_id),
1108 # Expect a breakpoint stop report.
1109 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
1110 ], True)
1111 context = self.expect_gdbremote_sequence()
1112 self.assertIsNotNone(context)
1113 self.assertIsNotNone(context.get("stop_signo"))
1114 self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP)
1115
1116 single_step_count += 1
1117
1118 # See if the predicate is true. If so, we're done.
1119 if predicate(args):
1120 return (True, single_step_count)
1121
1122 # The predicate didn't return true within the runaway step count.
1123 return (False, single_step_count)
1124
1125 def g_c1_c2_contents_are(self, args):
1126 """Used by single step test that appears in a few different contexts."""
1127 g_c1_address = args["g_c1_address"]
1128 g_c2_address = args["g_c2_address"]
1129 expected_g_c1 = args["expected_g_c1"]
1130 expected_g_c2 = args["expected_g_c2"]
1131
1132 # Read g_c1 and g_c2 contents.
1133 self.reset_test_sequence()
1134 self.test_sequence.add_log_lines(
1135 ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
1136 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} },
1137 "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
1138 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }],
1139 True)
1140
1141 # Run the packet stream.
1142 context = self.expect_gdbremote_sequence()
1143 self.assertIsNotNone(context)
1144
1145 # Check if what we read from inferior memory is what we are expecting.
1146 self.assertIsNotNone(context.get("g_c1_contents"))
1147 self.assertIsNotNone(context.get("g_c2_contents"))
1148
1149 return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2)
1150
1151 def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"):
1152 """Used by single step test that appears in a few different contexts."""
1153 # Start up the inferior.
1154 procs = self.prep_debug_monitor_and_inferior(
1155 inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"])
1156
1157 # Run the process
1158 self.test_sequence.add_log_lines(
1159 [# Start running after initial stop.
Stephane Sezer22ed42e2014-11-13 21:39:24 +00001160 "read packet: $c#63",
Todd Fialae2202002014-06-27 22:11:56 +00001161 # Match output line that prints the memory address of the function call entry point.
1162 # Note we require launch-only testing so we can get inferior otuput.
1163 { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
1164 "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} },
1165 # Now stop the inferior.
1166 "read packet: {}".format(chr(03)),
1167 # And wait for the stop notification.
1168 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }],
1169 True)
1170
1171 # Run the packet stream.
1172 context = self.expect_gdbremote_sequence()
1173 self.assertIsNotNone(context)
1174
1175 # Grab the main thread id.
1176 self.assertIsNotNone(context.get("stop_thread_id"))
1177 main_thread_id = int(context.get("stop_thread_id"), 16)
1178
1179 # Grab the function address.
1180 self.assertIsNotNone(context.get("function_address"))
1181 function_address = int(context.get("function_address"), 16)
1182
1183 # Grab the data addresses.
1184 self.assertIsNotNone(context.get("g_c1_address"))
1185 g_c1_address = int(context.get("g_c1_address"), 16)
1186
1187 self.assertIsNotNone(context.get("g_c2_address"))
1188 g_c2_address = int(context.get("g_c2_address"), 16)
1189
1190 # Set a breakpoint at the given address.
1191 # Note this might need to be switched per platform (ARM, mips, etc.).
1192 BREAKPOINT_KIND = 1
1193 self.reset_test_sequence()
1194 self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND)
1195 context = self.expect_gdbremote_sequence()
1196 self.assertIsNotNone(context)
1197
1198 # Remove the breakpoint.
1199 self.reset_test_sequence()
1200 self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND)
1201 context = self.expect_gdbremote_sequence()
1202 self.assertIsNotNone(context)
1203
1204 # Verify g_c1 and g_c2 match expected initial state.
1205 args = {}
1206 args["g_c1_address"] = g_c1_address
1207 args["g_c2_address"] = g_c2_address
1208 args["expected_g_c1"] = "0"
1209 args["expected_g_c2"] = "1"
1210
1211 self.assertTrue(self.g_c1_c2_contents_are(args))
1212
1213 # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code.
1214 args["expected_g_c1"] = "1"
1215 args["expected_g_c2"] = "1"
1216 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1217 self.assertTrue(state_reached)
1218
1219 # Verify we hit the next state.
1220 args["expected_g_c1"] = "1"
1221 args["expected_g_c2"] = "0"
1222 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1223 self.assertTrue(state_reached)
1224 self.assertEquals(step_count, 1)
1225
1226 # Verify we hit the next state.
1227 args["expected_g_c1"] = "0"
1228 args["expected_g_c2"] = "0"
1229 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1230 self.assertTrue(state_reached)
1231 self.assertEquals(step_count, 1)
1232
1233 # Verify we hit the next state.
1234 args["expected_g_c1"] = "0"
1235 args["expected_g_c2"] = "1"
1236 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1237 self.assertTrue(state_reached)
1238 self.assertEquals(step_count, 1)
Todd Fialaaf245d12014-06-30 21:05:18 +00001239