blob: 81a794e4e8764fcd2d4a2b632a8293b325cdd85f [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fiala24189d42014-07-14 06:24:44 +00006import os
7import os.path
Todd Fialae50b2e42014-06-13 19:11:33 +00008import platform
Todd Fiala9e2d3292014-07-09 23:10:43 +00009import random
Todd Fialae2202002014-06-27 22:11:56 +000010import re
Todd Fiala31bde322014-07-26 20:39:17 +000011import select
Todd Fialae50b2e42014-06-13 19:11:33 +000012import sets
13import signal
14import socket
15import subprocess
16import sys
Todd Fiala24189d42014-07-14 06:24:44 +000017import tempfile
Todd Fialae50b2e42014-06-13 19:11:33 +000018import time
Todd Fiala24189d42014-07-14 06:24:44 +000019import unittest2
Todd Fialae50b2e42014-06-13 19:11:33 +000020from lldbtest import *
21from lldbgdbserverutils import *
22import logging
Todd Fialae50b2e42014-06-13 19:11:33 +000023
24class GdbRemoteTestCaseBase(TestBase):
25
26 mydir = TestBase.compute_mydir(__file__)
27
Todd Fialae50b2e42014-06-13 19:11:33 +000028 _TIMEOUT_SECONDS = 5
29
30 _GDBREMOTE_KILL_PACKET = "$k#6b"
31
32 _LOGGING_LEVEL = logging.WARNING
33 # _LOGGING_LEVEL = logging.DEBUG
34
Todd Fiala7306cf32014-07-29 22:30:01 +000035 # Start the inferior separately, attach to the inferior on the stub command line.
Todd Fialae50b2e42014-06-13 19:11:33 +000036 _STARTUP_ATTACH = "attach"
Todd Fiala7306cf32014-07-29 22:30:01 +000037 # Start the inferior separately, start the stub without attaching, allow the test to attach to the inferior however it wants (e.g. $vAttach;pid).
38 _STARTUP_ATTACH_MANUALLY = "attach_manually"
39 # Start the stub, and launch the inferior with an $A packet via the initial packet stream.
Todd Fialae50b2e42014-06-13 19:11:33 +000040 _STARTUP_LAUNCH = "launch"
41
42 # GDB Signal numbers that are not target-specific used for common exceptions
43 TARGET_EXC_BAD_ACCESS = 0x91
44 TARGET_EXC_BAD_INSTRUCTION = 0x92
45 TARGET_EXC_ARITHMETIC = 0x93
46 TARGET_EXC_EMULATION = 0x94
47 TARGET_EXC_SOFTWARE = 0x95
48 TARGET_EXC_BREAKPOINT = 0x96
49
50 def setUp(self):
51 TestBase.setUp(self)
52 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
53 logging.basicConfig(format=FORMAT)
54 self.logger = logging.getLogger(__name__)
55 self.logger.setLevel(self._LOGGING_LEVEL)
56 self.test_sequence = GdbRemoteTestSequence(self.logger)
57 self.set_inferior_startup_launch()
Todd Fiala9e2d3292014-07-09 23:10:43 +000058 self.port = self.get_next_port()
Todd Fiala67041192014-07-11 22:50:13 +000059 self.named_pipe_path = None
Todd Fiala24189d42014-07-14 06:24:44 +000060 self.named_pipe = None
61 self.named_pipe_fd = None
Todd Fialaf9ad21d2014-07-16 16:15:42 +000062 self.stub_sends_two_stop_notifications_on_kill = False
Chaoren Linf62b7fc2015-05-05 20:34:36 +000063 if lldb.platform_url:
Chaoren Linf9c4c422015-05-06 01:28:04 +000064 scheme, host = re.match('(.+)://(.+):\d+', lldb.platform_url).groups()
65 self.stub_hostname = 'localhost' if scheme == 'adb' else host
Tamas Berghammerde786792015-03-30 10:52:32 +000066 else:
67 self.stub_hostname = "localhost"
Todd Fialae50b2e42014-06-13 19:11:33 +000068
Todd Fiala9e2d3292014-07-09 23:10:43 +000069 def get_next_port(self):
70 return 12000 + random.randint(0,3999)
Todd Fialae50b2e42014-06-13 19:11:33 +000071
72 def reset_test_sequence(self):
73 self.test_sequence = GdbRemoteTestSequence(self.logger)
74
Todd Fiala24189d42014-07-14 06:24:44 +000075 def create_named_pipe(self):
76 # Create a temp dir and name for a pipe.
77 temp_dir = tempfile.mkdtemp()
78 named_pipe_path = os.path.join(temp_dir, "stub_port_number")
79
80 # Create the named pipe.
81 os.mkfifo(named_pipe_path)
82
83 # Open the read side of the pipe in non-blocking mode. This will return right away, ready or not.
84 named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK)
85
86 # Create the file for the named pipe. Note this will follow semantics of
87 # a non-blocking read side of a named pipe, which has different semantics
88 # than a named pipe opened for read in non-blocking mode.
89 named_pipe = os.fdopen(named_pipe_fd, "r")
90 self.assertIsNotNone(named_pipe)
91
92 def shutdown_named_pipe():
93 # Close the pipe.
94 try:
95 named_pipe.close()
96 except:
97 print "failed to close named pipe"
98 None
99
100 # Delete the pipe.
101 try:
102 os.remove(named_pipe_path)
103 except:
104 print "failed to delete named pipe: {}".format(named_pipe_path)
105 None
106
107 # Delete the temp directory.
108 try:
109 os.rmdir(temp_dir)
110 except:
111 print "failed to delete temp dir: {}, directory contents: '{}'".format(temp_dir, os.listdir(temp_dir))
112 None
113
114 # Add the shutdown hook to clean up the named pipe.
115 self.addTearDownHook(shutdown_named_pipe)
116
117 # Clear the port so the stub selects a port number.
118 self.port = 0
119
120 return (named_pipe_path, named_pipe, named_pipe_fd)
121
122 def get_stub_port_from_named_socket(self, read_timeout_seconds=5):
123 # Wait for something to read with a max timeout.
124 (ready_readers, _, _) = select.select([self.named_pipe_fd], [], [], read_timeout_seconds)
125 self.assertIsNotNone(ready_readers, "write side of pipe has not written anything - stub isn't writing to pipe.")
126 self.assertNotEqual(len(ready_readers), 0, "write side of pipe has not written anything - stub isn't writing to pipe.")
127
128 # Read the port from the named pipe.
129 stub_port_raw = self.named_pipe.read()
130 self.assertIsNotNone(stub_port_raw)
131 self.assertNotEqual(len(stub_port_raw), 0, "no content to read on pipe")
132
133 # Trim null byte, convert to int.
134 stub_port_raw = stub_port_raw[:-1]
135 stub_port = int(stub_port_raw)
136 self.assertTrue(stub_port > 0)
137
138 return stub_port
139
140 def init_llgs_test(self, use_named_pipe=True):
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000141 if lldb.remote_platform:
142 # Remote platforms don't support named pipe based port negotiation
143 use_named_pipe = False
144
145 platform = self.dbg.GetSelectedPlatform()
146
147 shell_command = lldb.SBPlatformShellCommand("echo $PPID")
148 err = platform.Run(shell_command)
149 if err.Fail():
150 raise Exception("remote_platform.RunShellCommand('echo $PPID') failed: %s" % err)
151 pid = shell_command.GetOutput().strip()
152
153 shell_command = lldb.SBPlatformShellCommand("readlink /proc/%s/exe" % pid)
154 err = platform.Run(shell_command)
155 if err.Fail():
156 raise Exception("remote_platform.RunShellCommand('readlink /proc/%d/exe') failed: %s" % (pid, err))
157 self.debug_monitor_exe = shell_command.GetOutput().strip()
158 dname = self.dbg.GetSelectedPlatform().GetWorkingDirectory()
159 else:
160 self.debug_monitor_exe = get_lldb_server_exe()
161 if not self.debug_monitor_exe:
162 self.skipTest("lldb-server exe not found")
163 dname = os.path.join(os.environ["LLDB_TEST"], os.environ["LLDB_SESSION_DIRNAME"])
164
165 self.debug_monitor_extra_args = ["gdbserver", "-c", "log enable -T -f {}/process-{}.log lldb break process thread".format(dname, self.id()), "-c", "log enable -T -f {}/packets-{}.log gdb-remote packets".format(dname, self.id())]
Todd Fiala24189d42014-07-14 06:24:44 +0000166 if use_named_pipe:
167 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialae50b2e42014-06-13 19:11:33 +0000168
Todd Fiala24189d42014-07-14 06:24:44 +0000169 def init_debugserver_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000170 self.debug_monitor_exe = get_debugserver_exe()
171 if not self.debug_monitor_exe:
172 self.skipTest("debugserver exe not found")
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000173 self.debug_monitor_extra_args = ["--log-file=/tmp/packets-{}.log".format(self._testMethodName), "--log-flags=0x800000"]
Todd Fiala24189d42014-07-14 06:24:44 +0000174 if use_named_pipe:
175 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialaf9ad21d2014-07-16 16:15:42 +0000176 # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification
177 # when the process truly dies.
178 self.stub_sends_two_stop_notifications_on_kill = True
Todd Fialae50b2e42014-06-13 19:11:33 +0000179
Tamas Berghammer27c8d362015-03-13 14:32:25 +0000180 def forward_adb_port(self, source, target, direction):
181 def remove_port_forward():
182 subprocess.call(["adb", direction, "--remove", "tcp:%d" % source])
183
184 subprocess.call(["adb", direction, "tcp:%d" % source, "tcp:%d" % target])
185 self.addTearDownHook(remove_port_forward)
186
Todd Fialae50b2e42014-06-13 19:11:33 +0000187 def create_socket(self):
188 sock = socket.socket()
189 logger = self.logger
190
Tamas Berghammerde786792015-03-30 10:52:32 +0000191 triple = self.dbg.GetSelectedPlatform().GetTriple()
192 if re.match(".*-.*-.*-android", triple):
193 self.forward_adb_port(self.port, self.port, "forward")
194
195 connect_info = (self.stub_hostname, self.port)
196 sock.connect(connect_info)
197
Todd Fialae50b2e42014-06-13 19:11:33 +0000198 def shutdown_socket():
199 if sock:
200 try:
Robert Flack8cc4cf12015-03-06 14:36:33 +0000201 # send the kill packet so lldb-server shuts down gracefully
Todd Fialae50b2e42014-06-13 19:11:33 +0000202 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
203 except:
204 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
205
206 try:
207 sock.close()
208 except:
209 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
210
211 self.addTearDownHook(shutdown_socket)
212
Todd Fialae50b2e42014-06-13 19:11:33 +0000213 return sock
214
215 def set_inferior_startup_launch(self):
216 self._inferior_startup = self._STARTUP_LAUNCH
217
218 def set_inferior_startup_attach(self):
219 self._inferior_startup = self._STARTUP_ATTACH
220
Todd Fiala7306cf32014-07-29 22:30:01 +0000221 def set_inferior_startup_attach_manually(self):
222 self._inferior_startup = self._STARTUP_ATTACH_MANUALLY
223
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000224 def get_debug_monitor_command_line_args(self, attach_pid=None):
Tamas Berghammerde786792015-03-30 10:52:32 +0000225 if lldb.remote_platform:
226 commandline_args = self.debug_monitor_extra_args + ["*:{}".format(self.port)]
227 else:
228 commandline_args = self.debug_monitor_extra_args + ["localhost:{}".format(self.port)]
229
Todd Fialae50b2e42014-06-13 19:11:33 +0000230 if attach_pid:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000231 commandline_args += ["--attach=%d" % attach_pid]
Todd Fiala67041192014-07-11 22:50:13 +0000232 if self.named_pipe_path:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000233 commandline_args += ["--named-pipe", self.named_pipe_path]
234 return commandline_args
235
236 def run_platform_command(self, cmd):
237 platform = self.dbg.GetSelectedPlatform()
238 shell_command = lldb.SBPlatformShellCommand(cmd)
239 err = platform.Run(shell_command)
240 return (err, shell_command.GetOutput())
Todd Fiala31bde322014-07-26 20:39:17 +0000241
242 def launch_debug_monitor(self, attach_pid=None, logfile=None):
243 # Create the command line.
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000244 commandline_args = self.get_debug_monitor_command_line_args(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000245
Todd Fiala8aae4f42014-06-13 23:34:17 +0000246 # Start the server.
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000247 server = self.spawnSubprocess(self.debug_monitor_exe, commandline_args, install_remote=False)
248 self.addTearDownHook(self.cleanupSubprocesses)
Todd Fiala24189d42014-07-14 06:24:44 +0000249 self.assertIsNotNone(server)
Todd Fiala24189d42014-07-14 06:24:44 +0000250
251 # If we're receiving the stub's listening port from the named pipe, do that here.
252 if self.named_pipe:
253 self.port = self.get_stub_port_from_named_socket()
Todd Fialae50b2e42014-06-13 19:11:33 +0000254
Todd Fiala8aae4f42014-06-13 23:34:17 +0000255 return server
256
257 def connect_to_debug_monitor(self, attach_pid=None):
Todd Fiala24189d42014-07-14 06:24:44 +0000258 if self.named_pipe:
259 # Create the stub.
260 server = self.launch_debug_monitor(attach_pid=attach_pid)
261 self.assertIsNotNone(server)
262
263 def shutdown_debug_monitor():
264 try:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000265 server.terminate()
Todd Fiala24189d42014-07-14 06:24:44 +0000266 except:
Ilia K7c1d91d2015-04-15 13:08:35 +0000267 logger.warning("failed to terminate server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
Todd Fiala24189d42014-07-14 06:24:44 +0000268 self.addTearDownHook(shutdown_debug_monitor)
269
270 # Schedule debug monitor to be shut down during teardown.
271 logger = self.logger
272
273 # Attach to the stub and return a socket opened to it.
274 self.sock = self.create_socket()
275 return server
276
277 # We're using a random port algorithm to try not to collide with other ports,
278 # and retry a max # times.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000279 attempts = 0
280 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000281
Todd Fiala8aae4f42014-06-13 23:34:17 +0000282 while attempts < MAX_ATTEMPTS:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000283 server = self.launch_debug_monitor(attach_pid=attach_pid)
284
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000285 # Schedule debug monitor to be shut down during teardown.
286 logger = self.logger
287 def shutdown_debug_monitor():
Todd Fiala9e2d3292014-07-09 23:10:43 +0000288 try:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000289 server.terminate()
290 except:
291 logger.warning("failed to terminate server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
292 self.addTearDownHook(shutdown_debug_monitor)
293
Tamas Berghammerde786792015-03-30 10:52:32 +0000294 connect_attemps = 0
295 MAX_CONNECT_ATTEMPTS = 10
296
297 while connect_attemps < MAX_CONNECT_ATTEMPTS:
298 # Create a socket to talk to the server
299 try:
300 self.sock = self.create_socket()
301 return server
302 except socket.error as serr:
303 # We're only trying to handle connection refused.
304 if serr.errno != errno.ECONNREFUSED:
305 raise serr
306 time.sleep(0.5)
307 connect_attemps += 1
308
309 # We should close the server here to be safe.
310 server.terminate()
Todd Fiala9e2d3292014-07-09 23:10:43 +0000311
312 # Increment attempts.
313 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
314 attempts += 1
315
316 # And wait a random length of time before next attempt, to avoid collisions.
317 time.sleep(random.randint(1,5))
318
319 # Now grab a new port number.
320 self.port = self.get_next_port()
Todd Fiala8aae4f42014-06-13 23:34:17 +0000321
322 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000323
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000324 def launch_process_for_attach(self, inferior_args=None, sleep_seconds=3, exe_path=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000325 # We're going to start a child process that the debug monitor stub can later attach to.
326 # This process needs to be started so that it just hangs around for a while. We'll
327 # have it sleep.
Todd Fiala58a2f662014-08-12 17:02:07 +0000328 if not exe_path:
329 exe_path = os.path.abspath("a.out")
Todd Fialae50b2e42014-06-13 19:11:33 +0000330
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000331 args = []
Todd Fialae50b2e42014-06-13 19:11:33 +0000332 if inferior_args:
333 args.extend(inferior_args)
334 if sleep_seconds:
335 args.append("sleep:%d" % sleep_seconds)
336
Ilia K7c1d91d2015-04-15 13:08:35 +0000337 inferior = self.spawnSubprocess(exe_path, args)
338 def shutdown_process_for_attach():
339 try:
340 inferior.terminate()
341 except:
342 logger.warning("failed to terminate inferior process for attach: {}; ignoring".format(sys.exc_info()[0]))
343 self.addTearDownHook(shutdown_process_for_attach)
344 return inferior
Todd Fialae50b2e42014-06-13 19:11:33 +0000345
Todd Fiala58a2f662014-08-12 17:02:07 +0000346 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3, inferior_exe_path=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000347 """Prep the debug monitor, the inferior, and the expected packet stream.
348
349 Handle the separate cases of using the debug monitor in attach-to-inferior mode
350 and in launch-inferior mode.
351
352 For attach-to-inferior mode, the inferior process is first started, then
353 the debug monitor is started in attach to pid mode (using --attach on the
354 stub command line), and the no-ack-mode setup is appended to the packet
355 stream. The packet stream is not yet executed, ready to have more expected
356 packet entries added to it.
357
358 For launch-inferior mode, the stub is first started, then no ack mode is
359 setup on the expected packet stream, then the verified launch packets are added
360 to the expected socket stream. The packet stream is not yet executed, ready
361 to have more expected packet entries added to it.
362
363 The return value is:
364 {inferior:<inferior>, server:<server>}
365 """
366 inferior = None
367 attach_pid = None
368
Todd Fiala7306cf32014-07-29 22:30:01 +0000369 if self._inferior_startup == self._STARTUP_ATTACH or self._inferior_startup == self._STARTUP_ATTACH_MANUALLY:
Todd Fialae50b2e42014-06-13 19:11:33 +0000370 # Launch the process that we'll use as the inferior.
Todd Fiala58a2f662014-08-12 17:02:07 +0000371 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds, exe_path=inferior_exe_path)
Todd Fialae50b2e42014-06-13 19:11:33 +0000372 self.assertIsNotNone(inferior)
373 self.assertTrue(inferior.pid > 0)
Todd Fiala7306cf32014-07-29 22:30:01 +0000374 if self._inferior_startup == self._STARTUP_ATTACH:
375 # In this case, we want the stub to attach via the command line, so set the command line attach pid here.
376 attach_pid = inferior.pid
Todd Fialae50b2e42014-06-13 19:11:33 +0000377
Todd Fialae50b2e42014-06-13 19:11:33 +0000378 if self._inferior_startup == self._STARTUP_LAUNCH:
379 # Build launch args
Todd Fiala58a2f662014-08-12 17:02:07 +0000380 if not inferior_exe_path:
381 inferior_exe_path = os.path.abspath("a.out")
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000382
383 if lldb.remote_platform:
384 remote_work_dir = lldb.remote_platform.GetWorkingDirectory()
385 remote_path = os.path.join(remote_work_dir, os.path.basename(inferior_exe_path))
386 remote_file_spec = lldb.SBFileSpec(remote_path, False)
387 err = lldb.remote_platform.Install(lldb.SBFileSpec(inferior_exe_path, True), remote_file_spec)
388 if err.Fail():
389 raise Exception("remote_platform.Install('%s', '%s') failed: %s" % (inferior_exe_path, remote_path, err))
390 inferior_exe_path = remote_path
391
Todd Fiala58a2f662014-08-12 17:02:07 +0000392 launch_args = [inferior_exe_path]
Todd Fialae50b2e42014-06-13 19:11:33 +0000393 if inferior_args:
394 launch_args.extend(inferior_args)
395
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000396 # Launch the debug monitor stub, attaching to the inferior.
397 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
398 self.assertIsNotNone(server)
399
Todd Fialae50b2e42014-06-13 19:11:33 +0000400 # Build the expected protocol stream
401 self.add_no_ack_remote_stream()
402 if self._inferior_startup == self._STARTUP_LAUNCH:
403 self.add_verified_launch_packets(launch_args)
404
405 return {"inferior":inferior, "server":server}
406
Todd Fiala31bde322014-07-26 20:39:17 +0000407 def expect_socket_recv(self, sock, expected_content_regex, timeout_seconds):
408 response = ""
409 timeout_time = time.time() + timeout_seconds
410
411 while not expected_content_regex.match(response) and time.time() < timeout_time:
412 can_read, _, _ = select.select([sock], [], [], timeout_seconds)
413 if can_read and sock in can_read:
414 recv_bytes = sock.recv(4096)
415 if recv_bytes:
416 response += recv_bytes
417
418 self.assertTrue(expected_content_regex.match(response))
419
420 def expect_socket_send(self, sock, content, timeout_seconds):
421 request_bytes_remaining = content
422 timeout_time = time.time() + timeout_seconds
423
424 while len(request_bytes_remaining) > 0 and time.time() < timeout_time:
425 _, can_write, _ = select.select([], [sock], [], timeout_seconds)
426 if can_write and sock in can_write:
427 written_byte_count = sock.send(request_bytes_remaining)
428 request_bytes_remaining = request_bytes_remaining[written_byte_count:]
429 self.assertEquals(len(request_bytes_remaining), 0)
430
431 def do_handshake(self, stub_socket, timeout_seconds=5):
432 # Write the ack.
433 self.expect_socket_send(stub_socket, "+", timeout_seconds)
434
435 # Send the start no ack mode packet.
436 NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0"
437 bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST)
438 self.assertEquals(bytes_sent, len(NO_ACK_MODE_REQUEST))
439
440 # Receive the ack and "OK"
441 self.expect_socket_recv(stub_socket, re.compile(r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds)
442
443 # Send the final ack.
444 self.expect_socket_send(stub_socket, "+", timeout_seconds)
445
Todd Fialae50b2e42014-06-13 19:11:33 +0000446 def add_no_ack_remote_stream(self):
447 self.test_sequence.add_log_lines(
448 ["read packet: +",
449 "read packet: $QStartNoAckMode#b0",
450 "send packet: +",
451 "send packet: $OK#9a",
452 "read packet: +"],
453 True)
454
455 def add_verified_launch_packets(self, launch_args):
456 self.test_sequence.add_log_lines(
457 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
458 "send packet: $OK#00",
459 "read packet: $qLaunchSuccess#a5",
460 "send packet: $OK#00"],
461 True)
462
463 def add_thread_suffix_request_packets(self):
464 self.test_sequence.add_log_lines(
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000465 ["read packet: $QThreadSuffixSupported#e4",
Todd Fialae50b2e42014-06-13 19:11:33 +0000466 "send packet: $OK#00",
467 ], True)
468
469 def add_process_info_collection_packets(self):
470 self.test_sequence.add_log_lines(
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000471 ["read packet: $qProcessInfo#dc",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000472 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000473 True)
474
475 _KNOWN_PROCESS_INFO_KEYS = [
476 "pid",
477 "parent-pid",
478 "real-uid",
479 "real-gid",
480 "effective-uid",
481 "effective-gid",
482 "cputype",
483 "cpusubtype",
484 "ostype",
Todd Fialac540dd02014-08-26 18:21:02 +0000485 "triple",
Todd Fialae50b2e42014-06-13 19:11:33 +0000486 "vendor",
487 "endian",
488 "ptrsize"
489 ]
490
491 def parse_process_info_response(self, context):
492 # Ensure we have a process info response.
493 self.assertIsNotNone(context)
494 process_info_raw = context.get("process_info_raw")
495 self.assertIsNotNone(process_info_raw)
496
497 # Pull out key:value; pairs.
498 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
499
500 # Validate keys are known.
501 for (key, val) in process_info_dict.items():
502 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
503 self.assertIsNotNone(val)
504
505 return process_info_dict
506
507 def add_register_info_collection_packets(self):
508 self.test_sequence.add_log_lines(
509 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
510 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
511 "save_key":"reg_info_responses" } ],
512 True)
513
514 def parse_register_info_packets(self, context):
515 """Return an array of register info dictionaries, one per register info."""
516 reg_info_responses = context.get("reg_info_responses")
517 self.assertIsNotNone(reg_info_responses)
518
519 # Parse register infos.
520 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
521
Todd Fiala50a211b2014-06-14 22:00:36 +0000522 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000523 if not timeout_seconds:
524 timeout_seconds = self._TIMEOUT_SECONDS
525 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000526
527 _KNOWN_REGINFO_KEYS = [
528 "name",
529 "alt-name",
530 "bitsize",
531 "offset",
532 "encoding",
533 "format",
534 "set",
535 "gcc",
536 "dwarf",
537 "generic",
538 "container-regs",
539 "invalidate-regs"
540 ]
541
542 def assert_valid_reg_info(self, reg_info):
543 # Assert we know about all the reginfo keys parsed.
544 for key in reg_info:
545 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
546
547 # Check the bare-minimum expected set of register info keys.
548 self.assertTrue("name" in reg_info)
549 self.assertTrue("bitsize" in reg_info)
550 self.assertTrue("offset" in reg_info)
551 self.assertTrue("encoding" in reg_info)
552 self.assertTrue("format" in reg_info)
553
554 def find_pc_reg_info(self, reg_infos):
555 lldb_reg_index = 0
556 for reg_info in reg_infos:
557 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
558 return (lldb_reg_index, reg_info)
559 lldb_reg_index += 1
560
561 return (None, None)
562
563 def add_lldb_register_index(self, reg_infos):
564 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
565
566 We'll use this when we want to call packets like P/p with a register index but do so
567 on only a subset of the full register info set.
568 """
569 self.assertIsNotNone(reg_infos)
570
571 reg_index = 0
572 for reg_info in reg_infos:
573 reg_info["lldb_register_index"] = reg_index
574 reg_index += 1
575
576 def add_query_memory_region_packets(self, address):
577 self.test_sequence.add_log_lines(
578 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
579 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
580 True)
581
Todd Fialac30281a2014-06-14 03:03:23 +0000582 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000583 self.assertIsNotNone(key_val_text)
584 kv_dict = {}
585 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000586 key = match.group(1)
587 val = match.group(2)
588 if key in kv_dict:
589 if allow_dupes:
590 if type(kv_dict[key]) == list:
591 kv_dict[key].append(val)
592 else:
593 # Promote to list
594 kv_dict[key] = [kv_dict[key], val]
595 else:
596 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
597 else:
598 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000599 return kv_dict
600
601 def parse_memory_region_packet(self, context):
602 # Ensure we have a context.
603 self.assertIsNotNone(context.get("memory_region_response"))
604
605 # Pull out key:value; pairs.
606 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
607
608 # Validate keys are known.
609 for (key, val) in mem_region_dict.items():
610 self.assertTrue(key in ["start", "size", "permissions", "error"])
611 self.assertIsNotNone(val)
612
613 # Return the dictionary of key-value pairs for the memory region.
614 return mem_region_dict
615
616 def assert_address_within_memory_region(self, test_address, mem_region_dict):
617 self.assertIsNotNone(mem_region_dict)
618 self.assertTrue("start" in mem_region_dict)
619 self.assertTrue("size" in mem_region_dict)
620
621 range_start = int(mem_region_dict["start"], 16)
622 range_size = int(mem_region_dict["size"], 16)
623 range_end = range_start + range_size
624
625 if test_address < range_start:
626 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
627 elif test_address >= range_end:
628 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
629
630 def add_threadinfo_collection_packets(self):
631 self.test_sequence.add_log_lines(
632 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
633 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
634 "save_key":"threadinfo_responses" } ],
635 True)
636
637 def parse_threadinfo_packets(self, context):
638 """Return an array of thread ids (decimal ints), one per thread."""
639 threadinfo_responses = context.get("threadinfo_responses")
640 self.assertIsNotNone(threadinfo_responses)
641
642 thread_ids = []
643 for threadinfo_response in threadinfo_responses:
644 new_thread_infos = parse_threadinfo_response(threadinfo_response)
645 thread_ids.extend(new_thread_infos)
646 return thread_ids
647
648 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
649 start_time = time.time()
650 timeout_time = start_time + timeout_seconds
651
652 actual_thread_count = 0
653 while actual_thread_count < thread_count:
654 self.reset_test_sequence()
655 self.add_threadinfo_collection_packets()
656
657 context = self.expect_gdbremote_sequence()
658 self.assertIsNotNone(context)
659
660 threads = self.parse_threadinfo_packets(context)
661 self.assertIsNotNone(threads)
662
663 actual_thread_count = len(threads)
664
665 if time.time() > timeout_time:
666 raise Exception(
667 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
668 timeout_seconds, thread_count, actual_thread_count))
669
670 return threads
671
672 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
673 self.test_sequence.add_log_lines(
674 [# Set the breakpoint.
675 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
676 # Verify the stub could set it.
677 "send packet: $OK#00",
678 ], True)
679
680 if (do_continue):
681 self.test_sequence.add_log_lines(
682 [# Continue the inferior.
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000683 "read packet: $c#63",
Todd Fialae50b2e42014-06-13 19:11:33 +0000684 # Expect a breakpoint stop report.
685 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
686 ], True)
687
688 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
689 self.test_sequence.add_log_lines(
690 [# Remove the breakpoint.
691 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
692 # Verify the stub could unset it.
693 "send packet: $OK#00",
694 ], True)
695
696 def add_qSupported_packets(self):
697 self.test_sequence.add_log_lines(
698 ["read packet: $qSupported#00",
699 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
700 ], True)
701
702 _KNOWN_QSUPPORTED_STUB_FEATURES = [
703 "augmented-libraries-svr4-read",
704 "PacketSize",
705 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000706 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000707 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000708 "qXfer:auxv:read",
709 "qXfer:libraries:read",
710 "qXfer:libraries-svr4:read",
711 ]
712
713 def parse_qSupported_response(self, context):
714 self.assertIsNotNone(context)
715
716 raw_response = context.get("qSupported_response")
717 self.assertIsNotNone(raw_response)
718
719 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
720 # +,-,? is stripped from the key and set as the value.
721 supported_dict = {}
722 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
723 key = match.group(1)
724 val = match.group(3)
725
726 # key=val: store as is
727 if val and len(val) > 0:
728 supported_dict[key] = val
729 else:
730 if len(key) < 2:
731 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
732 supported_type = key[-1]
733 key = key[:-1]
734 if not supported_type in ["+", "-", "?"]:
735 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
736 supported_dict[key] = supported_type
737 # Ensure we know the supported element
738 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
739 raise Exception("unknown qSupported stub feature reported: %s" % key)
740
741 return supported_dict
742
743 def run_process_then_stop(self, run_seconds=1):
744 # Tell the stub to continue.
745 self.test_sequence.add_log_lines(
Stephane Sezerb6e81922014-11-20 18:50:46 +0000746 ["read packet: $vCont;c#a8"],
Todd Fialae50b2e42014-06-13 19:11:33 +0000747 True)
748 context = self.expect_gdbremote_sequence()
749
750 # Wait for run_seconds.
751 time.sleep(run_seconds)
752
753 # Send an interrupt, capture a T response.
754 self.reset_test_sequence()
755 self.test_sequence.add_log_lines(
756 ["read packet: {}".format(chr(03)),
757 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
758 True)
759 context = self.expect_gdbremote_sequence()
760 self.assertIsNotNone(context)
761 self.assertIsNotNone(context.get("stop_result"))
762
763 return context
764
765 def select_modifiable_register(self, reg_infos):
766 """Find a register that can be read/written freely."""
767 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
768
769 # First check for the first register from the preferred register name set.
770 alternative_register_index = None
771
772 self.assertIsNotNone(reg_infos)
773 for reg_info in reg_infos:
774 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
775 # We found a preferred register. Use it.
776 return reg_info["lldb_register_index"]
777 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
778 # A frame pointer register will do as a register to modify temporarily.
779 alternative_register_index = reg_info["lldb_register_index"]
780
781 # We didn't find a preferred register. Return whatever alternative register
782 # we found, if any.
783 return alternative_register_index
784
785 def extract_registers_from_stop_notification(self, stop_key_vals_text):
786 self.assertIsNotNone(stop_key_vals_text)
787 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
788
789 registers = {}
790 for (key, val) in kv_dict.items():
Stephane Sezer5109a792014-11-14 09:46:21 +0000791 if re.match(r"^[0-9a-fA-F]+$", key):
Todd Fialae50b2e42014-06-13 19:11:33 +0000792 registers[int(key, 16)] = val
793 return registers
794
795 def gather_register_infos(self):
796 self.reset_test_sequence()
797 self.add_register_info_collection_packets()
798
799 context = self.expect_gdbremote_sequence()
800 self.assertIsNotNone(context)
801
802 reg_infos = self.parse_register_info_packets(context)
803 self.assertIsNotNone(reg_infos)
804 self.add_lldb_register_index(reg_infos)
805
806 return reg_infos
807
808 def find_generic_register_with_name(self, reg_infos, generic_name):
809 self.assertIsNotNone(reg_infos)
810 for reg_info in reg_infos:
811 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
812 return reg_info
813 return None
814
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000815 def decode_gdbremote_binary(self, encoded_bytes):
816 decoded_bytes = ""
817 i = 0
818 while i < len(encoded_bytes):
819 if encoded_bytes[i] == "}":
820 # Handle escaped char.
821 self.assertTrue(i + 1 < len(encoded_bytes))
822 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
823 i +=2
824 elif encoded_bytes[i] == "*":
825 # Handle run length encoding.
826 self.assertTrue(len(decoded_bytes) > 0)
827 self.assertTrue(i + 1 < len(encoded_bytes))
828 repeat_count = ord(encoded_bytes[i+1]) - 29
829 decoded_bytes += decoded_bytes[-1] * repeat_count
830 i += 2
831 else:
832 decoded_bytes += encoded_bytes[i]
833 i += 1
834 return decoded_bytes
835
836 def build_auxv_dict(self, endian, word_size, auxv_data):
837 self.assertIsNotNone(endian)
838 self.assertIsNotNone(word_size)
839 self.assertIsNotNone(auxv_data)
840
841 auxv_dict = {}
842
843 while len(auxv_data) > 0:
844 # Chop off key.
845 raw_key = auxv_data[:word_size]
846 auxv_data = auxv_data[word_size:]
847
848 # Chop of value.
849 raw_value = auxv_data[:word_size]
850 auxv_data = auxv_data[word_size:]
851
852 # Convert raw text from target endian.
853 key = unpack_endian_binary_string(endian, raw_key)
854 value = unpack_endian_binary_string(endian, raw_value)
855
856 # Handle ending entry.
857 if key == 0:
858 self.assertEquals(value, 0)
859 return auxv_dict
860
861 # The key should not already be present.
862 self.assertFalse(key in auxv_dict)
863 auxv_dict[key] = value
864
865 self.fail("should not reach here - implies required double zero entry not found")
866 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000867
868 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
869 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
870 offset = 0
871 done = False
872 decoded_data = ""
873
874 while not done:
875 # Grab the next iteration of data.
876 self.reset_test_sequence()
877 self.test_sequence.add_log_lines([
878 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000879 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000880 ], True)
881
882 context = self.expect_gdbremote_sequence()
883 self.assertIsNotNone(context)
884
885 response_type = context.get("response_type")
886 self.assertIsNotNone(response_type)
887 self.assertTrue(response_type in ["l", "m"])
888
889 # Move offset along.
890 offset += chunk_length
891
892 # Figure out if we're done. We're done if the response type is l.
893 done = response_type == "l"
894
895 # Decode binary data.
896 content_raw = context.get("content_raw")
897 if content_raw and len(content_raw) > 0:
898 self.assertIsNotNone(content_raw)
899 decoded_data += self.decode_gdbremote_binary(content_raw)
900 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000901
902 def add_interrupt_packets(self):
903 self.test_sequence.add_log_lines([
904 # Send the intterupt.
905 "read packet: {}".format(chr(03)),
906 # And wait for the stop notification.
907 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
908 ], True)
909
910 def parse_interrupt_packets(self, context):
911 self.assertIsNotNone(context.get("stop_signo"))
912 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000913 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
914
915 def add_QSaveRegisterState_packets(self, thread_id):
916 if thread_id:
917 # Use the thread suffix form.
918 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
919 else:
920 request = "read packet: $QSaveRegisterState#00"
921
922 self.test_sequence.add_log_lines([
923 request,
924 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
925 ], True)
926
927 def parse_QSaveRegisterState_response(self, context):
928 self.assertIsNotNone(context)
929
930 save_response = context.get("save_response")
931 self.assertIsNotNone(save_response)
932
933 if len(save_response) < 1 or save_response[0] == "E":
934 # error received
935 return (False, None)
936 else:
937 return (True, int(save_response))
938
939 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
940 if thread_id:
941 # Use the thread suffix form.
942 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
943 else:
944 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
945
946 self.test_sequence.add_log_lines([
947 request,
948 "send packet: $OK#00"
949 ], True)
950
951 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
952 self.assertIsNotNone(reg_infos)
953
954 successful_writes = 0
955 failed_writes = 0
956
957 for reg_info in reg_infos:
958 # Use the lldb register index added to the reg info. We're not necessarily
959 # working off a full set of register infos, so an inferred register index could be wrong.
960 reg_index = reg_info["lldb_register_index"]
961 self.assertIsNotNone(reg_index)
962
963 reg_byte_size = int(reg_info["bitsize"])/8
964 self.assertTrue(reg_byte_size > 0)
965
966 # Handle thread suffix.
967 if thread_id:
968 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
969 else:
970 p_request = "read packet: $p{:x}#00".format(reg_index)
971
972 # Read the existing value.
973 self.reset_test_sequence()
974 self.test_sequence.add_log_lines([
975 p_request,
976 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
977 ], True)
978 context = self.expect_gdbremote_sequence()
979 self.assertIsNotNone(context)
980
981 # Verify the response length.
982 p_response = context.get("p_response")
983 self.assertIsNotNone(p_response)
984 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
985
986 # Flip the value by xoring with all 1s
987 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
988 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
989 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
990
991 # Handle thread suffix for P.
992 if thread_id:
993 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
994 else:
995 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
996
997 # Write the flipped value to the register.
998 self.reset_test_sequence()
999 self.test_sequence.add_log_lines([
1000 P_request,
1001 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
1002 ], True)
1003 context = self.expect_gdbremote_sequence()
1004 self.assertIsNotNone(context)
1005
1006 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
1007 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
1008 # all flipping perfectly.
1009 P_response = context.get("P_response")
1010 self.assertIsNotNone(P_response)
1011 if P_response == "OK":
1012 successful_writes += 1
1013 else:
1014 failed_writes += 1
1015 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
1016
1017 # Read back the register value, ensure it matches the flipped value.
1018 if P_response == "OK":
1019 self.reset_test_sequence()
1020 self.test_sequence.add_log_lines([
1021 p_request,
1022 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
1023 ], True)
1024 context = self.expect_gdbremote_sequence()
1025 self.assertIsNotNone(context)
1026
1027 verify_p_response_raw = context.get("p_response")
1028 self.assertIsNotNone(verify_p_response_raw)
1029 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
1030
1031 if verify_bits != flipped_bits_int:
1032 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
1033 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
1034 successful_writes -= 1
1035 failed_writes +=1
1036
1037 return (successful_writes, failed_writes)
1038
1039 def is_bit_flippable_register(self, reg_info):
1040 if not reg_info:
1041 return False
1042 if not "set" in reg_info:
1043 return False
1044 if reg_info["set"] != "General Purpose Registers":
1045 return False
1046 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
1047 # Don't try to bit flip registers contained in another register.
1048 return False
1049 if re.match("^.s$", reg_info["name"]):
1050 # This is a 2-letter register name that ends in "s", like a segment register.
1051 # Don't try to bit flip these.
1052 return False
1053 # Okay, this looks fine-enough.
1054 return True
1055
1056 def read_register_values(self, reg_infos, endian, thread_id=None):
1057 self.assertIsNotNone(reg_infos)
1058 values = {}
1059
1060 for reg_info in reg_infos:
1061 # We append a register index when load reg infos so we can work with subsets.
1062 reg_index = reg_info.get("lldb_register_index")
1063 self.assertIsNotNone(reg_index)
1064
1065 # Handle thread suffix.
1066 if thread_id:
1067 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
1068 else:
1069 p_request = "read packet: $p{:x}#00".format(reg_index)
1070
1071 # Read it with p.
1072 self.reset_test_sequence()
1073 self.test_sequence.add_log_lines([
1074 p_request,
1075 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
1076 ], True)
1077 context = self.expect_gdbremote_sequence()
1078 self.assertIsNotNone(context)
1079
1080 # Convert value from target endian to integral.
1081 p_response = context.get("p_response")
1082 self.assertIsNotNone(p_response)
1083 self.assertTrue(len(p_response) > 0)
1084 self.assertFalse(p_response[0] == "E")
1085
1086 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
1087
Todd Fialae2202002014-06-27 22:11:56 +00001088 return values
1089
1090 def add_vCont_query_packets(self):
1091 self.test_sequence.add_log_lines([
Stephane Sezer22ed42e2014-11-13 21:39:24 +00001092 "read packet: $vCont?#49",
Todd Fialae2202002014-06-27 22:11:56 +00001093 {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } },
1094 ], True)
1095
1096 def parse_vCont_query_response(self, context):
1097 self.assertIsNotNone(context)
1098 vCont_query_response = context.get("vCont_query_response")
1099
1100 # Handle case of no vCont support at all - in which case the capture group will be none or zero length.
1101 if not vCont_query_response or len(vCont_query_response) == 0:
1102 return {}
1103
1104 return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0}
1105
1106 def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"):
1107 """Used by single step test that appears in a few different contexts."""
1108 single_step_count = 0
1109
1110 while single_step_count < max_step_count:
1111 self.assertIsNotNone(thread_id)
1112
1113 # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id.
1114 step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
1115 # print "\nstep_packet created: {}\n".format(step_packet)
1116
1117 # Single step.
1118 self.reset_test_sequence()
1119 if use_Hc_packet:
1120 self.test_sequence.add_log_lines(
1121 [# Set the continue thread.
1122 "read packet: $Hc{0:x}#00".format(thread_id),
1123 "send packet: $OK#00",
1124 ], True)
1125 self.test_sequence.add_log_lines([
1126 # Single step.
1127 step_packet,
1128 # "read packet: $vCont;s:{0:x}#00".format(thread_id),
1129 # Expect a breakpoint stop report.
1130 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
1131 ], True)
1132 context = self.expect_gdbremote_sequence()
1133 self.assertIsNotNone(context)
1134 self.assertIsNotNone(context.get("stop_signo"))
1135 self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP)
1136
1137 single_step_count += 1
1138
1139 # See if the predicate is true. If so, we're done.
1140 if predicate(args):
1141 return (True, single_step_count)
1142
1143 # The predicate didn't return true within the runaway step count.
1144 return (False, single_step_count)
1145
1146 def g_c1_c2_contents_are(self, args):
1147 """Used by single step test that appears in a few different contexts."""
1148 g_c1_address = args["g_c1_address"]
1149 g_c2_address = args["g_c2_address"]
1150 expected_g_c1 = args["expected_g_c1"]
1151 expected_g_c2 = args["expected_g_c2"]
1152
1153 # Read g_c1 and g_c2 contents.
1154 self.reset_test_sequence()
1155 self.test_sequence.add_log_lines(
1156 ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
1157 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} },
1158 "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
1159 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }],
1160 True)
1161
1162 # Run the packet stream.
1163 context = self.expect_gdbremote_sequence()
1164 self.assertIsNotNone(context)
1165
1166 # Check if what we read from inferior memory is what we are expecting.
1167 self.assertIsNotNone(context.get("g_c1_contents"))
1168 self.assertIsNotNone(context.get("g_c2_contents"))
1169
1170 return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2)
1171
1172 def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"):
1173 """Used by single step test that appears in a few different contexts."""
1174 # Start up the inferior.
1175 procs = self.prep_debug_monitor_and_inferior(
1176 inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"])
1177
1178 # Run the process
1179 self.test_sequence.add_log_lines(
1180 [# Start running after initial stop.
Stephane Sezer22ed42e2014-11-13 21:39:24 +00001181 "read packet: $c#63",
Todd Fialae2202002014-06-27 22:11:56 +00001182 # Match output line that prints the memory address of the function call entry point.
1183 # Note we require launch-only testing so we can get inferior otuput.
1184 { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
1185 "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} },
1186 # Now stop the inferior.
1187 "read packet: {}".format(chr(03)),
1188 # And wait for the stop notification.
1189 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }],
1190 True)
1191
1192 # Run the packet stream.
1193 context = self.expect_gdbremote_sequence()
1194 self.assertIsNotNone(context)
1195
1196 # Grab the main thread id.
1197 self.assertIsNotNone(context.get("stop_thread_id"))
1198 main_thread_id = int(context.get("stop_thread_id"), 16)
1199
1200 # Grab the function address.
1201 self.assertIsNotNone(context.get("function_address"))
1202 function_address = int(context.get("function_address"), 16)
1203
1204 # Grab the data addresses.
1205 self.assertIsNotNone(context.get("g_c1_address"))
1206 g_c1_address = int(context.get("g_c1_address"), 16)
1207
1208 self.assertIsNotNone(context.get("g_c2_address"))
1209 g_c2_address = int(context.get("g_c2_address"), 16)
1210
1211 # Set a breakpoint at the given address.
1212 # Note this might need to be switched per platform (ARM, mips, etc.).
1213 BREAKPOINT_KIND = 1
1214 self.reset_test_sequence()
1215 self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND)
1216 context = self.expect_gdbremote_sequence()
1217 self.assertIsNotNone(context)
1218
1219 # Remove the breakpoint.
1220 self.reset_test_sequence()
1221 self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND)
1222 context = self.expect_gdbremote_sequence()
1223 self.assertIsNotNone(context)
1224
1225 # Verify g_c1 and g_c2 match expected initial state.
1226 args = {}
1227 args["g_c1_address"] = g_c1_address
1228 args["g_c2_address"] = g_c2_address
1229 args["expected_g_c1"] = "0"
1230 args["expected_g_c2"] = "1"
1231
1232 self.assertTrue(self.g_c1_c2_contents_are(args))
1233
1234 # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code.
1235 args["expected_g_c1"] = "1"
1236 args["expected_g_c2"] = "1"
1237 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1238 self.assertTrue(state_reached)
1239
1240 # Verify we hit the next state.
1241 args["expected_g_c1"] = "1"
1242 args["expected_g_c2"] = "0"
1243 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1244 self.assertTrue(state_reached)
1245 self.assertEquals(step_count, 1)
1246
1247 # Verify we hit the next state.
1248 args["expected_g_c1"] = "0"
1249 args["expected_g_c2"] = "0"
1250 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1251 self.assertTrue(state_reached)
1252 self.assertEquals(step_count, 1)
1253
1254 # Verify we hit the next state.
1255 args["expected_g_c1"] = "0"
1256 args["expected_g_c2"] = "1"
1257 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1258 self.assertTrue(state_reached)
1259 self.assertEquals(step_count, 1)
Todd Fialaaf245d12014-06-30 21:05:18 +00001260