blob: 74c5fb8a41e62bcc2425e209c010e130e1150e21 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fiala24189d42014-07-14 06:24:44 +00006import os
7import os.path
Todd Fialae50b2e42014-06-13 19:11:33 +00008import platform
Todd Fiala9e2d3292014-07-09 23:10:43 +00009import random
Todd Fialae2202002014-06-27 22:11:56 +000010import re
Todd Fiala31bde322014-07-26 20:39:17 +000011import select
Todd Fialae50b2e42014-06-13 19:11:33 +000012import sets
13import signal
14import socket
15import subprocess
16import sys
Todd Fiala24189d42014-07-14 06:24:44 +000017import tempfile
Todd Fialae50b2e42014-06-13 19:11:33 +000018import time
Todd Fiala24189d42014-07-14 06:24:44 +000019import unittest2
Todd Fialae50b2e42014-06-13 19:11:33 +000020from lldbtest import *
21from lldbgdbserverutils import *
22import logging
Todd Fialae50b2e42014-06-13 19:11:33 +000023
24class GdbRemoteTestCaseBase(TestBase):
25
26 mydir = TestBase.compute_mydir(__file__)
27
Todd Fialae50b2e42014-06-13 19:11:33 +000028 _TIMEOUT_SECONDS = 5
29
30 _GDBREMOTE_KILL_PACKET = "$k#6b"
31
32 _LOGGING_LEVEL = logging.WARNING
33 # _LOGGING_LEVEL = logging.DEBUG
34
Todd Fiala7306cf32014-07-29 22:30:01 +000035 # Start the inferior separately, attach to the inferior on the stub command line.
Todd Fialae50b2e42014-06-13 19:11:33 +000036 _STARTUP_ATTACH = "attach"
Todd Fiala7306cf32014-07-29 22:30:01 +000037 # Start the inferior separately, start the stub without attaching, allow the test to attach to the inferior however it wants (e.g. $vAttach;pid).
38 _STARTUP_ATTACH_MANUALLY = "attach_manually"
39 # Start the stub, and launch the inferior with an $A packet via the initial packet stream.
Todd Fialae50b2e42014-06-13 19:11:33 +000040 _STARTUP_LAUNCH = "launch"
41
42 # GDB Signal numbers that are not target-specific used for common exceptions
43 TARGET_EXC_BAD_ACCESS = 0x91
44 TARGET_EXC_BAD_INSTRUCTION = 0x92
45 TARGET_EXC_ARITHMETIC = 0x93
46 TARGET_EXC_EMULATION = 0x94
47 TARGET_EXC_SOFTWARE = 0x95
48 TARGET_EXC_BREAKPOINT = 0x96
49
50 def setUp(self):
51 TestBase.setUp(self)
52 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
53 logging.basicConfig(format=FORMAT)
54 self.logger = logging.getLogger(__name__)
55 self.logger.setLevel(self._LOGGING_LEVEL)
56 self.test_sequence = GdbRemoteTestSequence(self.logger)
57 self.set_inferior_startup_launch()
Todd Fiala9e2d3292014-07-09 23:10:43 +000058 self.port = self.get_next_port()
Todd Fiala67041192014-07-11 22:50:13 +000059 self.named_pipe_path = None
Todd Fiala24189d42014-07-14 06:24:44 +000060 self.named_pipe = None
61 self.named_pipe_fd = None
Todd Fialaf9ad21d2014-07-16 16:15:42 +000062 self.stub_sends_two_stop_notifications_on_kill = False
Chaoren Linf62b7fc2015-05-05 20:34:36 +000063 if lldb.platform_url:
64 self.stub_hostname = re.match(".*://(.*):[0-9]+", lldb.platform_url).group(1)
Tamas Berghammerde786792015-03-30 10:52:32 +000065 else:
66 self.stub_hostname = "localhost"
Todd Fialae50b2e42014-06-13 19:11:33 +000067
Todd Fiala9e2d3292014-07-09 23:10:43 +000068 def get_next_port(self):
69 return 12000 + random.randint(0,3999)
Todd Fialae50b2e42014-06-13 19:11:33 +000070
71 def reset_test_sequence(self):
72 self.test_sequence = GdbRemoteTestSequence(self.logger)
73
Todd Fiala24189d42014-07-14 06:24:44 +000074 def create_named_pipe(self):
75 # Create a temp dir and name for a pipe.
76 temp_dir = tempfile.mkdtemp()
77 named_pipe_path = os.path.join(temp_dir, "stub_port_number")
78
79 # Create the named pipe.
80 os.mkfifo(named_pipe_path)
81
82 # Open the read side of the pipe in non-blocking mode. This will return right away, ready or not.
83 named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK)
84
85 # Create the file for the named pipe. Note this will follow semantics of
86 # a non-blocking read side of a named pipe, which has different semantics
87 # than a named pipe opened for read in non-blocking mode.
88 named_pipe = os.fdopen(named_pipe_fd, "r")
89 self.assertIsNotNone(named_pipe)
90
91 def shutdown_named_pipe():
92 # Close the pipe.
93 try:
94 named_pipe.close()
95 except:
96 print "failed to close named pipe"
97 None
98
99 # Delete the pipe.
100 try:
101 os.remove(named_pipe_path)
102 except:
103 print "failed to delete named pipe: {}".format(named_pipe_path)
104 None
105
106 # Delete the temp directory.
107 try:
108 os.rmdir(temp_dir)
109 except:
110 print "failed to delete temp dir: {}, directory contents: '{}'".format(temp_dir, os.listdir(temp_dir))
111 None
112
113 # Add the shutdown hook to clean up the named pipe.
114 self.addTearDownHook(shutdown_named_pipe)
115
116 # Clear the port so the stub selects a port number.
117 self.port = 0
118
119 return (named_pipe_path, named_pipe, named_pipe_fd)
120
121 def get_stub_port_from_named_socket(self, read_timeout_seconds=5):
122 # Wait for something to read with a max timeout.
123 (ready_readers, _, _) = select.select([self.named_pipe_fd], [], [], read_timeout_seconds)
124 self.assertIsNotNone(ready_readers, "write side of pipe has not written anything - stub isn't writing to pipe.")
125 self.assertNotEqual(len(ready_readers), 0, "write side of pipe has not written anything - stub isn't writing to pipe.")
126
127 # Read the port from the named pipe.
128 stub_port_raw = self.named_pipe.read()
129 self.assertIsNotNone(stub_port_raw)
130 self.assertNotEqual(len(stub_port_raw), 0, "no content to read on pipe")
131
132 # Trim null byte, convert to int.
133 stub_port_raw = stub_port_raw[:-1]
134 stub_port = int(stub_port_raw)
135 self.assertTrue(stub_port > 0)
136
137 return stub_port
138
139 def init_llgs_test(self, use_named_pipe=True):
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000140 if lldb.remote_platform:
141 # Remote platforms don't support named pipe based port negotiation
142 use_named_pipe = False
143
144 platform = self.dbg.GetSelectedPlatform()
145
146 shell_command = lldb.SBPlatformShellCommand("echo $PPID")
147 err = platform.Run(shell_command)
148 if err.Fail():
149 raise Exception("remote_platform.RunShellCommand('echo $PPID') failed: %s" % err)
150 pid = shell_command.GetOutput().strip()
151
152 shell_command = lldb.SBPlatformShellCommand("readlink /proc/%s/exe" % pid)
153 err = platform.Run(shell_command)
154 if err.Fail():
155 raise Exception("remote_platform.RunShellCommand('readlink /proc/%d/exe') failed: %s" % (pid, err))
156 self.debug_monitor_exe = shell_command.GetOutput().strip()
157 dname = self.dbg.GetSelectedPlatform().GetWorkingDirectory()
158 else:
159 self.debug_monitor_exe = get_lldb_server_exe()
160 if not self.debug_monitor_exe:
161 self.skipTest("lldb-server exe not found")
162 dname = os.path.join(os.environ["LLDB_TEST"], os.environ["LLDB_SESSION_DIRNAME"])
163
164 self.debug_monitor_extra_args = ["gdbserver", "-c", "log enable -T -f {}/process-{}.log lldb break process thread".format(dname, self.id()), "-c", "log enable -T -f {}/packets-{}.log gdb-remote packets".format(dname, self.id())]
Todd Fiala24189d42014-07-14 06:24:44 +0000165 if use_named_pipe:
166 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialae50b2e42014-06-13 19:11:33 +0000167
Todd Fiala24189d42014-07-14 06:24:44 +0000168 def init_debugserver_test(self, use_named_pipe=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000169 self.debug_monitor_exe = get_debugserver_exe()
170 if not self.debug_monitor_exe:
171 self.skipTest("debugserver exe not found")
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000172 self.debug_monitor_extra_args = ["--log-file=/tmp/packets-{}.log".format(self._testMethodName), "--log-flags=0x800000"]
Todd Fiala24189d42014-07-14 06:24:44 +0000173 if use_named_pipe:
174 (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe()
Todd Fialaf9ad21d2014-07-16 16:15:42 +0000175 # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification
176 # when the process truly dies.
177 self.stub_sends_two_stop_notifications_on_kill = True
Todd Fialae50b2e42014-06-13 19:11:33 +0000178
Tamas Berghammer27c8d362015-03-13 14:32:25 +0000179 def forward_adb_port(self, source, target, direction):
180 def remove_port_forward():
181 subprocess.call(["adb", direction, "--remove", "tcp:%d" % source])
182
183 subprocess.call(["adb", direction, "tcp:%d" % source, "tcp:%d" % target])
184 self.addTearDownHook(remove_port_forward)
185
Todd Fialae50b2e42014-06-13 19:11:33 +0000186 def create_socket(self):
187 sock = socket.socket()
188 logger = self.logger
189
Tamas Berghammerde786792015-03-30 10:52:32 +0000190 triple = self.dbg.GetSelectedPlatform().GetTriple()
191 if re.match(".*-.*-.*-android", triple):
192 self.forward_adb_port(self.port, self.port, "forward")
193
194 connect_info = (self.stub_hostname, self.port)
195 sock.connect(connect_info)
196
Todd Fialae50b2e42014-06-13 19:11:33 +0000197 def shutdown_socket():
198 if sock:
199 try:
Robert Flack8cc4cf12015-03-06 14:36:33 +0000200 # send the kill packet so lldb-server shuts down gracefully
Todd Fialae50b2e42014-06-13 19:11:33 +0000201 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
202 except:
203 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
204
205 try:
206 sock.close()
207 except:
208 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
209
210 self.addTearDownHook(shutdown_socket)
211
Todd Fialae50b2e42014-06-13 19:11:33 +0000212 return sock
213
214 def set_inferior_startup_launch(self):
215 self._inferior_startup = self._STARTUP_LAUNCH
216
217 def set_inferior_startup_attach(self):
218 self._inferior_startup = self._STARTUP_ATTACH
219
Todd Fiala7306cf32014-07-29 22:30:01 +0000220 def set_inferior_startup_attach_manually(self):
221 self._inferior_startup = self._STARTUP_ATTACH_MANUALLY
222
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000223 def get_debug_monitor_command_line_args(self, attach_pid=None):
Tamas Berghammerde786792015-03-30 10:52:32 +0000224 if lldb.remote_platform:
225 commandline_args = self.debug_monitor_extra_args + ["*:{}".format(self.port)]
226 else:
227 commandline_args = self.debug_monitor_extra_args + ["localhost:{}".format(self.port)]
228
Todd Fialae50b2e42014-06-13 19:11:33 +0000229 if attach_pid:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000230 commandline_args += ["--attach=%d" % attach_pid]
Todd Fiala67041192014-07-11 22:50:13 +0000231 if self.named_pipe_path:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000232 commandline_args += ["--named-pipe", self.named_pipe_path]
233 return commandline_args
234
235 def run_platform_command(self, cmd):
236 platform = self.dbg.GetSelectedPlatform()
237 shell_command = lldb.SBPlatformShellCommand(cmd)
238 err = platform.Run(shell_command)
239 return (err, shell_command.GetOutput())
Todd Fiala31bde322014-07-26 20:39:17 +0000240
241 def launch_debug_monitor(self, attach_pid=None, logfile=None):
242 # Create the command line.
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000243 commandline_args = self.get_debug_monitor_command_line_args(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000244
Todd Fiala8aae4f42014-06-13 23:34:17 +0000245 # Start the server.
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000246 server = self.spawnSubprocess(self.debug_monitor_exe, commandline_args, install_remote=False)
247 self.addTearDownHook(self.cleanupSubprocesses)
Todd Fiala24189d42014-07-14 06:24:44 +0000248 self.assertIsNotNone(server)
Todd Fiala24189d42014-07-14 06:24:44 +0000249
250 # If we're receiving the stub's listening port from the named pipe, do that here.
251 if self.named_pipe:
252 self.port = self.get_stub_port_from_named_socket()
Todd Fialae50b2e42014-06-13 19:11:33 +0000253
Todd Fiala8aae4f42014-06-13 23:34:17 +0000254 return server
255
256 def connect_to_debug_monitor(self, attach_pid=None):
Todd Fiala24189d42014-07-14 06:24:44 +0000257 if self.named_pipe:
258 # Create the stub.
259 server = self.launch_debug_monitor(attach_pid=attach_pid)
260 self.assertIsNotNone(server)
261
262 def shutdown_debug_monitor():
263 try:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000264 server.terminate()
Todd Fiala24189d42014-07-14 06:24:44 +0000265 except:
Ilia K7c1d91d2015-04-15 13:08:35 +0000266 logger.warning("failed to terminate server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
Todd Fiala24189d42014-07-14 06:24:44 +0000267 self.addTearDownHook(shutdown_debug_monitor)
268
269 # Schedule debug monitor to be shut down during teardown.
270 logger = self.logger
271
272 # Attach to the stub and return a socket opened to it.
273 self.sock = self.create_socket()
274 return server
275
276 # We're using a random port algorithm to try not to collide with other ports,
277 # and retry a max # times.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000278 attempts = 0
279 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000280
Todd Fiala8aae4f42014-06-13 23:34:17 +0000281 while attempts < MAX_ATTEMPTS:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000282 server = self.launch_debug_monitor(attach_pid=attach_pid)
283
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000284 # Schedule debug monitor to be shut down during teardown.
285 logger = self.logger
286 def shutdown_debug_monitor():
Todd Fiala9e2d3292014-07-09 23:10:43 +0000287 try:
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000288 server.terminate()
289 except:
290 logger.warning("failed to terminate server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
291 self.addTearDownHook(shutdown_debug_monitor)
292
Tamas Berghammerde786792015-03-30 10:52:32 +0000293 connect_attemps = 0
294 MAX_CONNECT_ATTEMPTS = 10
295
296 while connect_attemps < MAX_CONNECT_ATTEMPTS:
297 # Create a socket to talk to the server
298 try:
299 self.sock = self.create_socket()
300 return server
301 except socket.error as serr:
302 # We're only trying to handle connection refused.
303 if serr.errno != errno.ECONNREFUSED:
304 raise serr
305 time.sleep(0.5)
306 connect_attemps += 1
307
308 # We should close the server here to be safe.
309 server.terminate()
Todd Fiala9e2d3292014-07-09 23:10:43 +0000310
311 # Increment attempts.
312 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
313 attempts += 1
314
315 # And wait a random length of time before next attempt, to avoid collisions.
316 time.sleep(random.randint(1,5))
317
318 # Now grab a new port number.
319 self.port = self.get_next_port()
Todd Fiala8aae4f42014-06-13 23:34:17 +0000320
321 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000322
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000323 def launch_process_for_attach(self, inferior_args=None, sleep_seconds=3, exe_path=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000324 # We're going to start a child process that the debug monitor stub can later attach to.
325 # This process needs to be started so that it just hangs around for a while. We'll
326 # have it sleep.
Todd Fiala58a2f662014-08-12 17:02:07 +0000327 if not exe_path:
328 exe_path = os.path.abspath("a.out")
Todd Fialae50b2e42014-06-13 19:11:33 +0000329
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000330 args = []
Todd Fialae50b2e42014-06-13 19:11:33 +0000331 if inferior_args:
332 args.extend(inferior_args)
333 if sleep_seconds:
334 args.append("sleep:%d" % sleep_seconds)
335
Ilia K7c1d91d2015-04-15 13:08:35 +0000336 inferior = self.spawnSubprocess(exe_path, args)
337 def shutdown_process_for_attach():
338 try:
339 inferior.terminate()
340 except:
341 logger.warning("failed to terminate inferior process for attach: {}; ignoring".format(sys.exc_info()[0]))
342 self.addTearDownHook(shutdown_process_for_attach)
343 return inferior
Todd Fialae50b2e42014-06-13 19:11:33 +0000344
Todd Fiala58a2f662014-08-12 17:02:07 +0000345 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3, inferior_exe_path=None):
Todd Fialae50b2e42014-06-13 19:11:33 +0000346 """Prep the debug monitor, the inferior, and the expected packet stream.
347
348 Handle the separate cases of using the debug monitor in attach-to-inferior mode
349 and in launch-inferior mode.
350
351 For attach-to-inferior mode, the inferior process is first started, then
352 the debug monitor is started in attach to pid mode (using --attach on the
353 stub command line), and the no-ack-mode setup is appended to the packet
354 stream. The packet stream is not yet executed, ready to have more expected
355 packet entries added to it.
356
357 For launch-inferior mode, the stub is first started, then no ack mode is
358 setup on the expected packet stream, then the verified launch packets are added
359 to the expected socket stream. The packet stream is not yet executed, ready
360 to have more expected packet entries added to it.
361
362 The return value is:
363 {inferior:<inferior>, server:<server>}
364 """
365 inferior = None
366 attach_pid = None
367
Todd Fiala7306cf32014-07-29 22:30:01 +0000368 if self._inferior_startup == self._STARTUP_ATTACH or self._inferior_startup == self._STARTUP_ATTACH_MANUALLY:
Todd Fialae50b2e42014-06-13 19:11:33 +0000369 # Launch the process that we'll use as the inferior.
Todd Fiala58a2f662014-08-12 17:02:07 +0000370 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds, exe_path=inferior_exe_path)
Todd Fialae50b2e42014-06-13 19:11:33 +0000371 self.assertIsNotNone(inferior)
372 self.assertTrue(inferior.pid > 0)
Todd Fiala7306cf32014-07-29 22:30:01 +0000373 if self._inferior_startup == self._STARTUP_ATTACH:
374 # In this case, we want the stub to attach via the command line, so set the command line attach pid here.
375 attach_pid = inferior.pid
Todd Fialae50b2e42014-06-13 19:11:33 +0000376
Todd Fialae50b2e42014-06-13 19:11:33 +0000377 if self._inferior_startup == self._STARTUP_LAUNCH:
378 # Build launch args
Todd Fiala58a2f662014-08-12 17:02:07 +0000379 if not inferior_exe_path:
380 inferior_exe_path = os.path.abspath("a.out")
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000381
382 if lldb.remote_platform:
383 remote_work_dir = lldb.remote_platform.GetWorkingDirectory()
384 remote_path = os.path.join(remote_work_dir, os.path.basename(inferior_exe_path))
385 remote_file_spec = lldb.SBFileSpec(remote_path, False)
386 err = lldb.remote_platform.Install(lldb.SBFileSpec(inferior_exe_path, True), remote_file_spec)
387 if err.Fail():
388 raise Exception("remote_platform.Install('%s', '%s') failed: %s" % (inferior_exe_path, remote_path, err))
389 inferior_exe_path = remote_path
390
Todd Fiala58a2f662014-08-12 17:02:07 +0000391 launch_args = [inferior_exe_path]
Todd Fialae50b2e42014-06-13 19:11:33 +0000392 if inferior_args:
393 launch_args.extend(inferior_args)
394
Tamas Berghammer04f51d12015-03-11 13:51:07 +0000395 # Launch the debug monitor stub, attaching to the inferior.
396 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
397 self.assertIsNotNone(server)
398
Todd Fialae50b2e42014-06-13 19:11:33 +0000399 # Build the expected protocol stream
400 self.add_no_ack_remote_stream()
401 if self._inferior_startup == self._STARTUP_LAUNCH:
402 self.add_verified_launch_packets(launch_args)
403
404 return {"inferior":inferior, "server":server}
405
Todd Fiala31bde322014-07-26 20:39:17 +0000406 def expect_socket_recv(self, sock, expected_content_regex, timeout_seconds):
407 response = ""
408 timeout_time = time.time() + timeout_seconds
409
410 while not expected_content_regex.match(response) and time.time() < timeout_time:
411 can_read, _, _ = select.select([sock], [], [], timeout_seconds)
412 if can_read and sock in can_read:
413 recv_bytes = sock.recv(4096)
414 if recv_bytes:
415 response += recv_bytes
416
417 self.assertTrue(expected_content_regex.match(response))
418
419 def expect_socket_send(self, sock, content, timeout_seconds):
420 request_bytes_remaining = content
421 timeout_time = time.time() + timeout_seconds
422
423 while len(request_bytes_remaining) > 0 and time.time() < timeout_time:
424 _, can_write, _ = select.select([], [sock], [], timeout_seconds)
425 if can_write and sock in can_write:
426 written_byte_count = sock.send(request_bytes_remaining)
427 request_bytes_remaining = request_bytes_remaining[written_byte_count:]
428 self.assertEquals(len(request_bytes_remaining), 0)
429
430 def do_handshake(self, stub_socket, timeout_seconds=5):
431 # Write the ack.
432 self.expect_socket_send(stub_socket, "+", timeout_seconds)
433
434 # Send the start no ack mode packet.
435 NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0"
436 bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST)
437 self.assertEquals(bytes_sent, len(NO_ACK_MODE_REQUEST))
438
439 # Receive the ack and "OK"
440 self.expect_socket_recv(stub_socket, re.compile(r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds)
441
442 # Send the final ack.
443 self.expect_socket_send(stub_socket, "+", timeout_seconds)
444
Todd Fialae50b2e42014-06-13 19:11:33 +0000445 def add_no_ack_remote_stream(self):
446 self.test_sequence.add_log_lines(
447 ["read packet: +",
448 "read packet: $QStartNoAckMode#b0",
449 "send packet: +",
450 "send packet: $OK#9a",
451 "read packet: +"],
452 True)
453
454 def add_verified_launch_packets(self, launch_args):
455 self.test_sequence.add_log_lines(
456 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
457 "send packet: $OK#00",
458 "read packet: $qLaunchSuccess#a5",
459 "send packet: $OK#00"],
460 True)
461
462 def add_thread_suffix_request_packets(self):
463 self.test_sequence.add_log_lines(
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000464 ["read packet: $QThreadSuffixSupported#e4",
Todd Fialae50b2e42014-06-13 19:11:33 +0000465 "send packet: $OK#00",
466 ], True)
467
468 def add_process_info_collection_packets(self):
469 self.test_sequence.add_log_lines(
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000470 ["read packet: $qProcessInfo#dc",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000471 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000472 True)
473
474 _KNOWN_PROCESS_INFO_KEYS = [
475 "pid",
476 "parent-pid",
477 "real-uid",
478 "real-gid",
479 "effective-uid",
480 "effective-gid",
481 "cputype",
482 "cpusubtype",
483 "ostype",
Todd Fialac540dd02014-08-26 18:21:02 +0000484 "triple",
Todd Fialae50b2e42014-06-13 19:11:33 +0000485 "vendor",
486 "endian",
487 "ptrsize"
488 ]
489
490 def parse_process_info_response(self, context):
491 # Ensure we have a process info response.
492 self.assertIsNotNone(context)
493 process_info_raw = context.get("process_info_raw")
494 self.assertIsNotNone(process_info_raw)
495
496 # Pull out key:value; pairs.
497 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
498
499 # Validate keys are known.
500 for (key, val) in process_info_dict.items():
501 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
502 self.assertIsNotNone(val)
503
504 return process_info_dict
505
506 def add_register_info_collection_packets(self):
507 self.test_sequence.add_log_lines(
508 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
509 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
510 "save_key":"reg_info_responses" } ],
511 True)
512
513 def parse_register_info_packets(self, context):
514 """Return an array of register info dictionaries, one per register info."""
515 reg_info_responses = context.get("reg_info_responses")
516 self.assertIsNotNone(reg_info_responses)
517
518 # Parse register infos.
519 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
520
Todd Fiala50a211b2014-06-14 22:00:36 +0000521 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000522 if not timeout_seconds:
523 timeout_seconds = self._TIMEOUT_SECONDS
524 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000525
526 _KNOWN_REGINFO_KEYS = [
527 "name",
528 "alt-name",
529 "bitsize",
530 "offset",
531 "encoding",
532 "format",
533 "set",
534 "gcc",
535 "dwarf",
536 "generic",
537 "container-regs",
538 "invalidate-regs"
539 ]
540
541 def assert_valid_reg_info(self, reg_info):
542 # Assert we know about all the reginfo keys parsed.
543 for key in reg_info:
544 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
545
546 # Check the bare-minimum expected set of register info keys.
547 self.assertTrue("name" in reg_info)
548 self.assertTrue("bitsize" in reg_info)
549 self.assertTrue("offset" in reg_info)
550 self.assertTrue("encoding" in reg_info)
551 self.assertTrue("format" in reg_info)
552
553 def find_pc_reg_info(self, reg_infos):
554 lldb_reg_index = 0
555 for reg_info in reg_infos:
556 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
557 return (lldb_reg_index, reg_info)
558 lldb_reg_index += 1
559
560 return (None, None)
561
562 def add_lldb_register_index(self, reg_infos):
563 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
564
565 We'll use this when we want to call packets like P/p with a register index but do so
566 on only a subset of the full register info set.
567 """
568 self.assertIsNotNone(reg_infos)
569
570 reg_index = 0
571 for reg_info in reg_infos:
572 reg_info["lldb_register_index"] = reg_index
573 reg_index += 1
574
575 def add_query_memory_region_packets(self, address):
576 self.test_sequence.add_log_lines(
577 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
578 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
579 True)
580
Todd Fialac30281a2014-06-14 03:03:23 +0000581 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000582 self.assertIsNotNone(key_val_text)
583 kv_dict = {}
584 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000585 key = match.group(1)
586 val = match.group(2)
587 if key in kv_dict:
588 if allow_dupes:
589 if type(kv_dict[key]) == list:
590 kv_dict[key].append(val)
591 else:
592 # Promote to list
593 kv_dict[key] = [kv_dict[key], val]
594 else:
595 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
596 else:
597 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000598 return kv_dict
599
600 def parse_memory_region_packet(self, context):
601 # Ensure we have a context.
602 self.assertIsNotNone(context.get("memory_region_response"))
603
604 # Pull out key:value; pairs.
605 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
606
607 # Validate keys are known.
608 for (key, val) in mem_region_dict.items():
609 self.assertTrue(key in ["start", "size", "permissions", "error"])
610 self.assertIsNotNone(val)
611
612 # Return the dictionary of key-value pairs for the memory region.
613 return mem_region_dict
614
615 def assert_address_within_memory_region(self, test_address, mem_region_dict):
616 self.assertIsNotNone(mem_region_dict)
617 self.assertTrue("start" in mem_region_dict)
618 self.assertTrue("size" in mem_region_dict)
619
620 range_start = int(mem_region_dict["start"], 16)
621 range_size = int(mem_region_dict["size"], 16)
622 range_end = range_start + range_size
623
624 if test_address < range_start:
625 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
626 elif test_address >= range_end:
627 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
628
629 def add_threadinfo_collection_packets(self):
630 self.test_sequence.add_log_lines(
631 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
632 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
633 "save_key":"threadinfo_responses" } ],
634 True)
635
636 def parse_threadinfo_packets(self, context):
637 """Return an array of thread ids (decimal ints), one per thread."""
638 threadinfo_responses = context.get("threadinfo_responses")
639 self.assertIsNotNone(threadinfo_responses)
640
641 thread_ids = []
642 for threadinfo_response in threadinfo_responses:
643 new_thread_infos = parse_threadinfo_response(threadinfo_response)
644 thread_ids.extend(new_thread_infos)
645 return thread_ids
646
647 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
648 start_time = time.time()
649 timeout_time = start_time + timeout_seconds
650
651 actual_thread_count = 0
652 while actual_thread_count < thread_count:
653 self.reset_test_sequence()
654 self.add_threadinfo_collection_packets()
655
656 context = self.expect_gdbremote_sequence()
657 self.assertIsNotNone(context)
658
659 threads = self.parse_threadinfo_packets(context)
660 self.assertIsNotNone(threads)
661
662 actual_thread_count = len(threads)
663
664 if time.time() > timeout_time:
665 raise Exception(
666 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
667 timeout_seconds, thread_count, actual_thread_count))
668
669 return threads
670
671 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
672 self.test_sequence.add_log_lines(
673 [# Set the breakpoint.
674 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
675 # Verify the stub could set it.
676 "send packet: $OK#00",
677 ], True)
678
679 if (do_continue):
680 self.test_sequence.add_log_lines(
681 [# Continue the inferior.
Stephane Sezer22ed42e2014-11-13 21:39:24 +0000682 "read packet: $c#63",
Todd Fialae50b2e42014-06-13 19:11:33 +0000683 # Expect a breakpoint stop report.
684 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
685 ], True)
686
687 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
688 self.test_sequence.add_log_lines(
689 [# Remove the breakpoint.
690 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
691 # Verify the stub could unset it.
692 "send packet: $OK#00",
693 ], True)
694
695 def add_qSupported_packets(self):
696 self.test_sequence.add_log_lines(
697 ["read packet: $qSupported#00",
698 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
699 ], True)
700
701 _KNOWN_QSUPPORTED_STUB_FEATURES = [
702 "augmented-libraries-svr4-read",
703 "PacketSize",
704 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000705 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000706 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000707 "qXfer:auxv:read",
708 "qXfer:libraries:read",
709 "qXfer:libraries-svr4:read",
710 ]
711
712 def parse_qSupported_response(self, context):
713 self.assertIsNotNone(context)
714
715 raw_response = context.get("qSupported_response")
716 self.assertIsNotNone(raw_response)
717
718 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
719 # +,-,? is stripped from the key and set as the value.
720 supported_dict = {}
721 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
722 key = match.group(1)
723 val = match.group(3)
724
725 # key=val: store as is
726 if val and len(val) > 0:
727 supported_dict[key] = val
728 else:
729 if len(key) < 2:
730 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
731 supported_type = key[-1]
732 key = key[:-1]
733 if not supported_type in ["+", "-", "?"]:
734 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
735 supported_dict[key] = supported_type
736 # Ensure we know the supported element
737 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
738 raise Exception("unknown qSupported stub feature reported: %s" % key)
739
740 return supported_dict
741
742 def run_process_then_stop(self, run_seconds=1):
743 # Tell the stub to continue.
744 self.test_sequence.add_log_lines(
Stephane Sezerb6e81922014-11-20 18:50:46 +0000745 ["read packet: $vCont;c#a8"],
Todd Fialae50b2e42014-06-13 19:11:33 +0000746 True)
747 context = self.expect_gdbremote_sequence()
748
749 # Wait for run_seconds.
750 time.sleep(run_seconds)
751
752 # Send an interrupt, capture a T response.
753 self.reset_test_sequence()
754 self.test_sequence.add_log_lines(
755 ["read packet: {}".format(chr(03)),
756 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
757 True)
758 context = self.expect_gdbremote_sequence()
759 self.assertIsNotNone(context)
760 self.assertIsNotNone(context.get("stop_result"))
761
762 return context
763
764 def select_modifiable_register(self, reg_infos):
765 """Find a register that can be read/written freely."""
766 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
767
768 # First check for the first register from the preferred register name set.
769 alternative_register_index = None
770
771 self.assertIsNotNone(reg_infos)
772 for reg_info in reg_infos:
773 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
774 # We found a preferred register. Use it.
775 return reg_info["lldb_register_index"]
776 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
777 # A frame pointer register will do as a register to modify temporarily.
778 alternative_register_index = reg_info["lldb_register_index"]
779
780 # We didn't find a preferred register. Return whatever alternative register
781 # we found, if any.
782 return alternative_register_index
783
784 def extract_registers_from_stop_notification(self, stop_key_vals_text):
785 self.assertIsNotNone(stop_key_vals_text)
786 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
787
788 registers = {}
789 for (key, val) in kv_dict.items():
Stephane Sezer5109a792014-11-14 09:46:21 +0000790 if re.match(r"^[0-9a-fA-F]+$", key):
Todd Fialae50b2e42014-06-13 19:11:33 +0000791 registers[int(key, 16)] = val
792 return registers
793
794 def gather_register_infos(self):
795 self.reset_test_sequence()
796 self.add_register_info_collection_packets()
797
798 context = self.expect_gdbremote_sequence()
799 self.assertIsNotNone(context)
800
801 reg_infos = self.parse_register_info_packets(context)
802 self.assertIsNotNone(reg_infos)
803 self.add_lldb_register_index(reg_infos)
804
805 return reg_infos
806
807 def find_generic_register_with_name(self, reg_infos, generic_name):
808 self.assertIsNotNone(reg_infos)
809 for reg_info in reg_infos:
810 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
811 return reg_info
812 return None
813
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000814 def decode_gdbremote_binary(self, encoded_bytes):
815 decoded_bytes = ""
816 i = 0
817 while i < len(encoded_bytes):
818 if encoded_bytes[i] == "}":
819 # Handle escaped char.
820 self.assertTrue(i + 1 < len(encoded_bytes))
821 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
822 i +=2
823 elif encoded_bytes[i] == "*":
824 # Handle run length encoding.
825 self.assertTrue(len(decoded_bytes) > 0)
826 self.assertTrue(i + 1 < len(encoded_bytes))
827 repeat_count = ord(encoded_bytes[i+1]) - 29
828 decoded_bytes += decoded_bytes[-1] * repeat_count
829 i += 2
830 else:
831 decoded_bytes += encoded_bytes[i]
832 i += 1
833 return decoded_bytes
834
835 def build_auxv_dict(self, endian, word_size, auxv_data):
836 self.assertIsNotNone(endian)
837 self.assertIsNotNone(word_size)
838 self.assertIsNotNone(auxv_data)
839
840 auxv_dict = {}
841
842 while len(auxv_data) > 0:
843 # Chop off key.
844 raw_key = auxv_data[:word_size]
845 auxv_data = auxv_data[word_size:]
846
847 # Chop of value.
848 raw_value = auxv_data[:word_size]
849 auxv_data = auxv_data[word_size:]
850
851 # Convert raw text from target endian.
852 key = unpack_endian_binary_string(endian, raw_key)
853 value = unpack_endian_binary_string(endian, raw_value)
854
855 # Handle ending entry.
856 if key == 0:
857 self.assertEquals(value, 0)
858 return auxv_dict
859
860 # The key should not already be present.
861 self.assertFalse(key in auxv_dict)
862 auxv_dict[key] = value
863
864 self.fail("should not reach here - implies required double zero entry not found")
865 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000866
867 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
868 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
869 offset = 0
870 done = False
871 decoded_data = ""
872
873 while not done:
874 # Grab the next iteration of data.
875 self.reset_test_sequence()
876 self.test_sequence.add_log_lines([
877 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000878 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000879 ], True)
880
881 context = self.expect_gdbremote_sequence()
882 self.assertIsNotNone(context)
883
884 response_type = context.get("response_type")
885 self.assertIsNotNone(response_type)
886 self.assertTrue(response_type in ["l", "m"])
887
888 # Move offset along.
889 offset += chunk_length
890
891 # Figure out if we're done. We're done if the response type is l.
892 done = response_type == "l"
893
894 # Decode binary data.
895 content_raw = context.get("content_raw")
896 if content_raw and len(content_raw) > 0:
897 self.assertIsNotNone(content_raw)
898 decoded_data += self.decode_gdbremote_binary(content_raw)
899 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000900
901 def add_interrupt_packets(self):
902 self.test_sequence.add_log_lines([
903 # Send the intterupt.
904 "read packet: {}".format(chr(03)),
905 # And wait for the stop notification.
906 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
907 ], True)
908
909 def parse_interrupt_packets(self, context):
910 self.assertIsNotNone(context.get("stop_signo"))
911 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000912 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
913
914 def add_QSaveRegisterState_packets(self, thread_id):
915 if thread_id:
916 # Use the thread suffix form.
917 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
918 else:
919 request = "read packet: $QSaveRegisterState#00"
920
921 self.test_sequence.add_log_lines([
922 request,
923 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
924 ], True)
925
926 def parse_QSaveRegisterState_response(self, context):
927 self.assertIsNotNone(context)
928
929 save_response = context.get("save_response")
930 self.assertIsNotNone(save_response)
931
932 if len(save_response) < 1 or save_response[0] == "E":
933 # error received
934 return (False, None)
935 else:
936 return (True, int(save_response))
937
938 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
939 if thread_id:
940 # Use the thread suffix form.
941 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
942 else:
943 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
944
945 self.test_sequence.add_log_lines([
946 request,
947 "send packet: $OK#00"
948 ], True)
949
950 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
951 self.assertIsNotNone(reg_infos)
952
953 successful_writes = 0
954 failed_writes = 0
955
956 for reg_info in reg_infos:
957 # Use the lldb register index added to the reg info. We're not necessarily
958 # working off a full set of register infos, so an inferred register index could be wrong.
959 reg_index = reg_info["lldb_register_index"]
960 self.assertIsNotNone(reg_index)
961
962 reg_byte_size = int(reg_info["bitsize"])/8
963 self.assertTrue(reg_byte_size > 0)
964
965 # Handle thread suffix.
966 if thread_id:
967 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
968 else:
969 p_request = "read packet: $p{:x}#00".format(reg_index)
970
971 # Read the existing value.
972 self.reset_test_sequence()
973 self.test_sequence.add_log_lines([
974 p_request,
975 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
976 ], True)
977 context = self.expect_gdbremote_sequence()
978 self.assertIsNotNone(context)
979
980 # Verify the response length.
981 p_response = context.get("p_response")
982 self.assertIsNotNone(p_response)
983 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
984
985 # Flip the value by xoring with all 1s
986 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
987 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
988 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
989
990 # Handle thread suffix for P.
991 if thread_id:
992 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
993 else:
994 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
995
996 # Write the flipped value to the register.
997 self.reset_test_sequence()
998 self.test_sequence.add_log_lines([
999 P_request,
1000 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
1001 ], True)
1002 context = self.expect_gdbremote_sequence()
1003 self.assertIsNotNone(context)
1004
1005 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
1006 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
1007 # all flipping perfectly.
1008 P_response = context.get("P_response")
1009 self.assertIsNotNone(P_response)
1010 if P_response == "OK":
1011 successful_writes += 1
1012 else:
1013 failed_writes += 1
1014 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
1015
1016 # Read back the register value, ensure it matches the flipped value.
1017 if P_response == "OK":
1018 self.reset_test_sequence()
1019 self.test_sequence.add_log_lines([
1020 p_request,
1021 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
1022 ], True)
1023 context = self.expect_gdbremote_sequence()
1024 self.assertIsNotNone(context)
1025
1026 verify_p_response_raw = context.get("p_response")
1027 self.assertIsNotNone(verify_p_response_raw)
1028 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
1029
1030 if verify_bits != flipped_bits_int:
1031 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
1032 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
1033 successful_writes -= 1
1034 failed_writes +=1
1035
1036 return (successful_writes, failed_writes)
1037
1038 def is_bit_flippable_register(self, reg_info):
1039 if not reg_info:
1040 return False
1041 if not "set" in reg_info:
1042 return False
1043 if reg_info["set"] != "General Purpose Registers":
1044 return False
1045 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
1046 # Don't try to bit flip registers contained in another register.
1047 return False
1048 if re.match("^.s$", reg_info["name"]):
1049 # This is a 2-letter register name that ends in "s", like a segment register.
1050 # Don't try to bit flip these.
1051 return False
1052 # Okay, this looks fine-enough.
1053 return True
1054
1055 def read_register_values(self, reg_infos, endian, thread_id=None):
1056 self.assertIsNotNone(reg_infos)
1057 values = {}
1058
1059 for reg_info in reg_infos:
1060 # We append a register index when load reg infos so we can work with subsets.
1061 reg_index = reg_info.get("lldb_register_index")
1062 self.assertIsNotNone(reg_index)
1063
1064 # Handle thread suffix.
1065 if thread_id:
1066 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
1067 else:
1068 p_request = "read packet: $p{:x}#00".format(reg_index)
1069
1070 # Read it with p.
1071 self.reset_test_sequence()
1072 self.test_sequence.add_log_lines([
1073 p_request,
1074 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
1075 ], True)
1076 context = self.expect_gdbremote_sequence()
1077 self.assertIsNotNone(context)
1078
1079 # Convert value from target endian to integral.
1080 p_response = context.get("p_response")
1081 self.assertIsNotNone(p_response)
1082 self.assertTrue(len(p_response) > 0)
1083 self.assertFalse(p_response[0] == "E")
1084
1085 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
1086
Todd Fialae2202002014-06-27 22:11:56 +00001087 return values
1088
1089 def add_vCont_query_packets(self):
1090 self.test_sequence.add_log_lines([
Stephane Sezer22ed42e2014-11-13 21:39:24 +00001091 "read packet: $vCont?#49",
Todd Fialae2202002014-06-27 22:11:56 +00001092 {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } },
1093 ], True)
1094
1095 def parse_vCont_query_response(self, context):
1096 self.assertIsNotNone(context)
1097 vCont_query_response = context.get("vCont_query_response")
1098
1099 # Handle case of no vCont support at all - in which case the capture group will be none or zero length.
1100 if not vCont_query_response or len(vCont_query_response) == 0:
1101 return {}
1102
1103 return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0}
1104
1105 def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"):
1106 """Used by single step test that appears in a few different contexts."""
1107 single_step_count = 0
1108
1109 while single_step_count < max_step_count:
1110 self.assertIsNotNone(thread_id)
1111
1112 # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id.
1113 step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
1114 # print "\nstep_packet created: {}\n".format(step_packet)
1115
1116 # Single step.
1117 self.reset_test_sequence()
1118 if use_Hc_packet:
1119 self.test_sequence.add_log_lines(
1120 [# Set the continue thread.
1121 "read packet: $Hc{0:x}#00".format(thread_id),
1122 "send packet: $OK#00",
1123 ], True)
1124 self.test_sequence.add_log_lines([
1125 # Single step.
1126 step_packet,
1127 # "read packet: $vCont;s:{0:x}#00".format(thread_id),
1128 # Expect a breakpoint stop report.
1129 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
1130 ], True)
1131 context = self.expect_gdbremote_sequence()
1132 self.assertIsNotNone(context)
1133 self.assertIsNotNone(context.get("stop_signo"))
1134 self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP)
1135
1136 single_step_count += 1
1137
1138 # See if the predicate is true. If so, we're done.
1139 if predicate(args):
1140 return (True, single_step_count)
1141
1142 # The predicate didn't return true within the runaway step count.
1143 return (False, single_step_count)
1144
1145 def g_c1_c2_contents_are(self, args):
1146 """Used by single step test that appears in a few different contexts."""
1147 g_c1_address = args["g_c1_address"]
1148 g_c2_address = args["g_c2_address"]
1149 expected_g_c1 = args["expected_g_c1"]
1150 expected_g_c2 = args["expected_g_c2"]
1151
1152 # Read g_c1 and g_c2 contents.
1153 self.reset_test_sequence()
1154 self.test_sequence.add_log_lines(
1155 ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
1156 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} },
1157 "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
1158 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }],
1159 True)
1160
1161 # Run the packet stream.
1162 context = self.expect_gdbremote_sequence()
1163 self.assertIsNotNone(context)
1164
1165 # Check if what we read from inferior memory is what we are expecting.
1166 self.assertIsNotNone(context.get("g_c1_contents"))
1167 self.assertIsNotNone(context.get("g_c2_contents"))
1168
1169 return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2)
1170
1171 def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"):
1172 """Used by single step test that appears in a few different contexts."""
1173 # Start up the inferior.
1174 procs = self.prep_debug_monitor_and_inferior(
1175 inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"])
1176
1177 # Run the process
1178 self.test_sequence.add_log_lines(
1179 [# Start running after initial stop.
Stephane Sezer22ed42e2014-11-13 21:39:24 +00001180 "read packet: $c#63",
Todd Fialae2202002014-06-27 22:11:56 +00001181 # Match output line that prints the memory address of the function call entry point.
1182 # Note we require launch-only testing so we can get inferior otuput.
1183 { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
1184 "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} },
1185 # Now stop the inferior.
1186 "read packet: {}".format(chr(03)),
1187 # And wait for the stop notification.
1188 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }],
1189 True)
1190
1191 # Run the packet stream.
1192 context = self.expect_gdbremote_sequence()
1193 self.assertIsNotNone(context)
1194
1195 # Grab the main thread id.
1196 self.assertIsNotNone(context.get("stop_thread_id"))
1197 main_thread_id = int(context.get("stop_thread_id"), 16)
1198
1199 # Grab the function address.
1200 self.assertIsNotNone(context.get("function_address"))
1201 function_address = int(context.get("function_address"), 16)
1202
1203 # Grab the data addresses.
1204 self.assertIsNotNone(context.get("g_c1_address"))
1205 g_c1_address = int(context.get("g_c1_address"), 16)
1206
1207 self.assertIsNotNone(context.get("g_c2_address"))
1208 g_c2_address = int(context.get("g_c2_address"), 16)
1209
1210 # Set a breakpoint at the given address.
1211 # Note this might need to be switched per platform (ARM, mips, etc.).
1212 BREAKPOINT_KIND = 1
1213 self.reset_test_sequence()
1214 self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND)
1215 context = self.expect_gdbremote_sequence()
1216 self.assertIsNotNone(context)
1217
1218 # Remove the breakpoint.
1219 self.reset_test_sequence()
1220 self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND)
1221 context = self.expect_gdbremote_sequence()
1222 self.assertIsNotNone(context)
1223
1224 # Verify g_c1 and g_c2 match expected initial state.
1225 args = {}
1226 args["g_c1_address"] = g_c1_address
1227 args["g_c2_address"] = g_c2_address
1228 args["expected_g_c1"] = "0"
1229 args["expected_g_c2"] = "1"
1230
1231 self.assertTrue(self.g_c1_c2_contents_are(args))
1232
1233 # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code.
1234 args["expected_g_c1"] = "1"
1235 args["expected_g_c2"] = "1"
1236 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1237 self.assertTrue(state_reached)
1238
1239 # Verify we hit the next state.
1240 args["expected_g_c1"] = "1"
1241 args["expected_g_c2"] = "0"
1242 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1243 self.assertTrue(state_reached)
1244 self.assertEquals(step_count, 1)
1245
1246 # Verify we hit the next state.
1247 args["expected_g_c1"] = "0"
1248 args["expected_g_c2"] = "0"
1249 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1250 self.assertTrue(state_reached)
1251 self.assertEquals(step_count, 1)
1252
1253 # Verify we hit the next state.
1254 args["expected_g_c1"] = "0"
1255 args["expected_g_c2"] = "1"
1256 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1257 self.assertTrue(state_reached)
1258 self.assertEquals(step_count, 1)
Todd Fialaaf245d12014-06-30 21:05:18 +00001259