Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 1 | """ |
| 2 | Base class for gdb-remote test cases. |
| 3 | """ |
| 4 | |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 5 | import errno |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 6 | import unittest2 |
| 7 | import pexpect |
| 8 | import platform |
| 9 | import sets |
| 10 | import signal |
| 11 | import socket |
| 12 | import subprocess |
| 13 | import sys |
| 14 | import time |
| 15 | from lldbtest import * |
| 16 | from lldbgdbserverutils import * |
| 17 | import logging |
| 18 | import os.path |
| 19 | |
| 20 | class GdbRemoteTestCaseBase(TestBase): |
| 21 | |
| 22 | mydir = TestBase.compute_mydir(__file__) |
| 23 | |
| 24 | port = 12345 |
| 25 | |
| 26 | _TIMEOUT_SECONDS = 5 |
| 27 | |
| 28 | _GDBREMOTE_KILL_PACKET = "$k#6b" |
| 29 | |
| 30 | _LOGGING_LEVEL = logging.WARNING |
| 31 | # _LOGGING_LEVEL = logging.DEBUG |
| 32 | |
| 33 | _STARTUP_ATTACH = "attach" |
| 34 | _STARTUP_LAUNCH = "launch" |
| 35 | |
| 36 | # GDB Signal numbers that are not target-specific used for common exceptions |
| 37 | TARGET_EXC_BAD_ACCESS = 0x91 |
| 38 | TARGET_EXC_BAD_INSTRUCTION = 0x92 |
| 39 | TARGET_EXC_ARITHMETIC = 0x93 |
| 40 | TARGET_EXC_EMULATION = 0x94 |
| 41 | TARGET_EXC_SOFTWARE = 0x95 |
| 42 | TARGET_EXC_BREAKPOINT = 0x96 |
| 43 | |
| 44 | def setUp(self): |
| 45 | TestBase.setUp(self) |
| 46 | FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s' |
| 47 | logging.basicConfig(format=FORMAT) |
| 48 | self.logger = logging.getLogger(__name__) |
| 49 | self.logger.setLevel(self._LOGGING_LEVEL) |
| 50 | self.test_sequence = GdbRemoteTestSequence(self.logger) |
| 51 | self.set_inferior_startup_launch() |
| 52 | |
| 53 | # Uncomment this code to force only a single test to run (by name). |
| 54 | #if not re.search(r"P_", self._testMethodName): |
| 55 | # self.skipTest("focusing on one test") |
| 56 | |
| 57 | def reset_test_sequence(self): |
| 58 | self.test_sequence = GdbRemoteTestSequence(self.logger) |
| 59 | |
| 60 | def init_llgs_test(self): |
| 61 | self.debug_monitor_exe = get_lldb_gdbserver_exe() |
| 62 | if not self.debug_monitor_exe: |
| 63 | self.skipTest("lldb_gdbserver exe not found") |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 64 | self.debug_monitor_extra_args = " -c 'log enable -T -f process-{}.log lldb break process thread' -c 'log enable -T -f packets-{}.log gdb-remote packets'".format(self.id(), self.id(), self.id()) |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 65 | |
| 66 | def init_debugserver_test(self): |
| 67 | self.debug_monitor_exe = get_debugserver_exe() |
| 68 | if not self.debug_monitor_exe: |
| 69 | self.skipTest("debugserver exe not found") |
| 70 | self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName) |
| 71 | |
| 72 | def create_socket(self): |
| 73 | sock = socket.socket() |
| 74 | logger = self.logger |
| 75 | |
| 76 | def shutdown_socket(): |
| 77 | if sock: |
| 78 | try: |
| 79 | # send the kill packet so lldb-gdbserver shuts down gracefully |
| 80 | sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET) |
| 81 | except: |
| 82 | logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0])) |
| 83 | |
| 84 | try: |
| 85 | sock.close() |
| 86 | except: |
| 87 | logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0])) |
| 88 | |
| 89 | self.addTearDownHook(shutdown_socket) |
| 90 | |
| 91 | sock.connect(('localhost', self.port)) |
| 92 | return sock |
| 93 | |
| 94 | def set_inferior_startup_launch(self): |
| 95 | self._inferior_startup = self._STARTUP_LAUNCH |
| 96 | |
| 97 | def set_inferior_startup_attach(self): |
| 98 | self._inferior_startup = self._STARTUP_ATTACH |
| 99 | |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 100 | def launch_debug_monitor(self, attach_pid=None): |
| 101 | # Create the command line. |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 102 | commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port) |
| 103 | if attach_pid: |
| 104 | commandline += " --attach=%d" % attach_pid |
| 105 | |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 106 | # Start the server. |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 107 | server = pexpect.spawn(commandline) |
| 108 | |
| 109 | # Turn on logging for what the child sends back. |
| 110 | if self.TraceOn(): |
| 111 | server.logfile_read = sys.stdout |
| 112 | |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 113 | return server |
| 114 | |
| 115 | def connect_to_debug_monitor(self, attach_pid=None): |
| 116 | server = self.launch_debug_monitor(attach_pid=attach_pid) |
| 117 | |
| 118 | # Wait until we receive the server ready message before continuing. |
| 119 | server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port)) |
| 120 | |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 121 | # Schedule debug monitor to be shut down during teardown. |
| 122 | logger = self.logger |
| 123 | def shutdown_debug_monitor(): |
| 124 | try: |
| 125 | server.close() |
| 126 | except: |
| 127 | logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0])) |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 128 | self.addTearDownHook(shutdown_debug_monitor) |
| 129 | |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 130 | attempts = 0 |
| 131 | MAX_ATTEMPTS = 20 |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 132 | |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 133 | while attempts < MAX_ATTEMPTS: |
| 134 | # Create a socket to talk to the server |
| 135 | try: |
| 136 | self.sock = self.create_socket() |
| 137 | return server |
| 138 | except socket.error as serr: |
| 139 | # We're only trying to handle connection refused |
| 140 | if serr.errno != errno.ECONNREFUSED: |
| 141 | raise serr |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 142 | |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 143 | # Increment attempts. |
| 144 | print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS)) |
| 145 | attempts += 1 |
| 146 | |
| 147 | # And wait a second before next attempt. |
| 148 | time.sleep(1) |
| 149 | |
| 150 | raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts) |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 151 | |
| 152 | def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3): |
| 153 | # We're going to start a child process that the debug monitor stub can later attach to. |
| 154 | # This process needs to be started so that it just hangs around for a while. We'll |
| 155 | # have it sleep. |
| 156 | exe_path = os.path.abspath("a.out") |
| 157 | |
| 158 | args = [exe_path] |
| 159 | if inferior_args: |
| 160 | args.extend(inferior_args) |
| 161 | if sleep_seconds: |
| 162 | args.append("sleep:%d" % sleep_seconds) |
| 163 | |
| 164 | return subprocess.Popen(args) |
| 165 | |
| 166 | def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3): |
| 167 | """Prep the debug monitor, the inferior, and the expected packet stream. |
| 168 | |
| 169 | Handle the separate cases of using the debug monitor in attach-to-inferior mode |
| 170 | and in launch-inferior mode. |
| 171 | |
| 172 | For attach-to-inferior mode, the inferior process is first started, then |
| 173 | the debug monitor is started in attach to pid mode (using --attach on the |
| 174 | stub command line), and the no-ack-mode setup is appended to the packet |
| 175 | stream. The packet stream is not yet executed, ready to have more expected |
| 176 | packet entries added to it. |
| 177 | |
| 178 | For launch-inferior mode, the stub is first started, then no ack mode is |
| 179 | setup on the expected packet stream, then the verified launch packets are added |
| 180 | to the expected socket stream. The packet stream is not yet executed, ready |
| 181 | to have more expected packet entries added to it. |
| 182 | |
| 183 | The return value is: |
| 184 | {inferior:<inferior>, server:<server>} |
| 185 | """ |
| 186 | inferior = None |
| 187 | attach_pid = None |
| 188 | |
| 189 | if self._inferior_startup == self._STARTUP_ATTACH: |
| 190 | # Launch the process that we'll use as the inferior. |
| 191 | inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds) |
| 192 | self.assertIsNotNone(inferior) |
| 193 | self.assertTrue(inferior.pid > 0) |
| 194 | attach_pid = inferior.pid |
| 195 | |
| 196 | # Launch the debug monitor stub, attaching to the inferior. |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 197 | server = self.connect_to_debug_monitor(attach_pid=attach_pid) |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 198 | self.assertIsNotNone(server) |
| 199 | |
| 200 | if self._inferior_startup == self._STARTUP_LAUNCH: |
| 201 | # Build launch args |
| 202 | launch_args = [os.path.abspath('a.out')] |
| 203 | if inferior_args: |
| 204 | launch_args.extend(inferior_args) |
| 205 | |
| 206 | # Build the expected protocol stream |
| 207 | self.add_no_ack_remote_stream() |
| 208 | if self._inferior_startup == self._STARTUP_LAUNCH: |
| 209 | self.add_verified_launch_packets(launch_args) |
| 210 | |
| 211 | return {"inferior":inferior, "server":server} |
| 212 | |
| 213 | def add_no_ack_remote_stream(self): |
| 214 | self.test_sequence.add_log_lines( |
| 215 | ["read packet: +", |
| 216 | "read packet: $QStartNoAckMode#b0", |
| 217 | "send packet: +", |
| 218 | "send packet: $OK#9a", |
| 219 | "read packet: +"], |
| 220 | True) |
| 221 | |
| 222 | def add_verified_launch_packets(self, launch_args): |
| 223 | self.test_sequence.add_log_lines( |
| 224 | ["read packet: %s" % build_gdbremote_A_packet(launch_args), |
| 225 | "send packet: $OK#00", |
| 226 | "read packet: $qLaunchSuccess#a5", |
| 227 | "send packet: $OK#00"], |
| 228 | True) |
| 229 | |
| 230 | def add_thread_suffix_request_packets(self): |
| 231 | self.test_sequence.add_log_lines( |
| 232 | ["read packet: $QThreadSuffixSupported#00", |
| 233 | "send packet: $OK#00", |
| 234 | ], True) |
| 235 | |
| 236 | def add_process_info_collection_packets(self): |
| 237 | self.test_sequence.add_log_lines( |
| 238 | ["read packet: $qProcessInfo#00", |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 239 | { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }], |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 240 | True) |
| 241 | |
| 242 | _KNOWN_PROCESS_INFO_KEYS = [ |
| 243 | "pid", |
| 244 | "parent-pid", |
| 245 | "real-uid", |
| 246 | "real-gid", |
| 247 | "effective-uid", |
| 248 | "effective-gid", |
| 249 | "cputype", |
| 250 | "cpusubtype", |
| 251 | "ostype", |
| 252 | "vendor", |
| 253 | "endian", |
| 254 | "ptrsize" |
| 255 | ] |
| 256 | |
| 257 | def parse_process_info_response(self, context): |
| 258 | # Ensure we have a process info response. |
| 259 | self.assertIsNotNone(context) |
| 260 | process_info_raw = context.get("process_info_raw") |
| 261 | self.assertIsNotNone(process_info_raw) |
| 262 | |
| 263 | # Pull out key:value; pairs. |
| 264 | process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) } |
| 265 | |
| 266 | # Validate keys are known. |
| 267 | for (key, val) in process_info_dict.items(): |
| 268 | self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS) |
| 269 | self.assertIsNotNone(val) |
| 270 | |
| 271 | return process_info_dict |
| 272 | |
| 273 | def add_register_info_collection_packets(self): |
| 274 | self.test_sequence.add_log_lines( |
| 275 | [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True, |
| 276 | "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"), |
| 277 | "save_key":"reg_info_responses" } ], |
| 278 | True) |
| 279 | |
| 280 | def parse_register_info_packets(self, context): |
| 281 | """Return an array of register info dictionaries, one per register info.""" |
| 282 | reg_info_responses = context.get("reg_info_responses") |
| 283 | self.assertIsNotNone(reg_info_responses) |
| 284 | |
| 285 | # Parse register infos. |
| 286 | return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses] |
| 287 | |
Todd Fiala | 50a211b | 2014-06-14 22:00:36 +0000 | [diff] [blame] | 288 | def expect_gdbremote_sequence(self, timeout_seconds=None): |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 289 | if not timeout_seconds: |
| 290 | timeout_seconds = self._TIMEOUT_SECONDS |
| 291 | return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger) |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 292 | |
| 293 | _KNOWN_REGINFO_KEYS = [ |
| 294 | "name", |
| 295 | "alt-name", |
| 296 | "bitsize", |
| 297 | "offset", |
| 298 | "encoding", |
| 299 | "format", |
| 300 | "set", |
| 301 | "gcc", |
| 302 | "dwarf", |
| 303 | "generic", |
| 304 | "container-regs", |
| 305 | "invalidate-regs" |
| 306 | ] |
| 307 | |
| 308 | def assert_valid_reg_info(self, reg_info): |
| 309 | # Assert we know about all the reginfo keys parsed. |
| 310 | for key in reg_info: |
| 311 | self.assertTrue(key in self._KNOWN_REGINFO_KEYS) |
| 312 | |
| 313 | # Check the bare-minimum expected set of register info keys. |
| 314 | self.assertTrue("name" in reg_info) |
| 315 | self.assertTrue("bitsize" in reg_info) |
| 316 | self.assertTrue("offset" in reg_info) |
| 317 | self.assertTrue("encoding" in reg_info) |
| 318 | self.assertTrue("format" in reg_info) |
| 319 | |
| 320 | def find_pc_reg_info(self, reg_infos): |
| 321 | lldb_reg_index = 0 |
| 322 | for reg_info in reg_infos: |
| 323 | if ("generic" in reg_info) and (reg_info["generic"] == "pc"): |
| 324 | return (lldb_reg_index, reg_info) |
| 325 | lldb_reg_index += 1 |
| 326 | |
| 327 | return (None, None) |
| 328 | |
| 329 | def add_lldb_register_index(self, reg_infos): |
| 330 | """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry. |
| 331 | |
| 332 | We'll use this when we want to call packets like P/p with a register index but do so |
| 333 | on only a subset of the full register info set. |
| 334 | """ |
| 335 | self.assertIsNotNone(reg_infos) |
| 336 | |
| 337 | reg_index = 0 |
| 338 | for reg_info in reg_infos: |
| 339 | reg_info["lldb_register_index"] = reg_index |
| 340 | reg_index += 1 |
| 341 | |
| 342 | def add_query_memory_region_packets(self, address): |
| 343 | self.test_sequence.add_log_lines( |
| 344 | ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address), |
| 345 | {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }], |
| 346 | True) |
| 347 | |
Todd Fiala | c30281a | 2014-06-14 03:03:23 +0000 | [diff] [blame] | 348 | def parse_key_val_dict(self, key_val_text, allow_dupes=True): |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 349 | self.assertIsNotNone(key_val_text) |
| 350 | kv_dict = {} |
| 351 | for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text): |
Todd Fiala | c30281a | 2014-06-14 03:03:23 +0000 | [diff] [blame] | 352 | key = match.group(1) |
| 353 | val = match.group(2) |
| 354 | if key in kv_dict: |
| 355 | if allow_dupes: |
| 356 | if type(kv_dict[key]) == list: |
| 357 | kv_dict[key].append(val) |
| 358 | else: |
| 359 | # Promote to list |
| 360 | kv_dict[key] = [kv_dict[key], val] |
| 361 | else: |
| 362 | self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict)) |
| 363 | else: |
| 364 | kv_dict[key] = val |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 365 | return kv_dict |
| 366 | |
| 367 | def parse_memory_region_packet(self, context): |
| 368 | # Ensure we have a context. |
| 369 | self.assertIsNotNone(context.get("memory_region_response")) |
| 370 | |
| 371 | # Pull out key:value; pairs. |
| 372 | mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response")) |
| 373 | |
| 374 | # Validate keys are known. |
| 375 | for (key, val) in mem_region_dict.items(): |
| 376 | self.assertTrue(key in ["start", "size", "permissions", "error"]) |
| 377 | self.assertIsNotNone(val) |
| 378 | |
| 379 | # Return the dictionary of key-value pairs for the memory region. |
| 380 | return mem_region_dict |
| 381 | |
| 382 | def assert_address_within_memory_region(self, test_address, mem_region_dict): |
| 383 | self.assertIsNotNone(mem_region_dict) |
| 384 | self.assertTrue("start" in mem_region_dict) |
| 385 | self.assertTrue("size" in mem_region_dict) |
| 386 | |
| 387 | range_start = int(mem_region_dict["start"], 16) |
| 388 | range_size = int(mem_region_dict["size"], 16) |
| 389 | range_end = range_start + range_size |
| 390 | |
| 391 | if test_address < range_start: |
| 392 | self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size)) |
| 393 | elif test_address >= range_end: |
| 394 | self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size)) |
| 395 | |
| 396 | def add_threadinfo_collection_packets(self): |
| 397 | self.test_sequence.add_log_lines( |
| 398 | [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo", |
| 399 | "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"), |
| 400 | "save_key":"threadinfo_responses" } ], |
| 401 | True) |
| 402 | |
| 403 | def parse_threadinfo_packets(self, context): |
| 404 | """Return an array of thread ids (decimal ints), one per thread.""" |
| 405 | threadinfo_responses = context.get("threadinfo_responses") |
| 406 | self.assertIsNotNone(threadinfo_responses) |
| 407 | |
| 408 | thread_ids = [] |
| 409 | for threadinfo_response in threadinfo_responses: |
| 410 | new_thread_infos = parse_threadinfo_response(threadinfo_response) |
| 411 | thread_ids.extend(new_thread_infos) |
| 412 | return thread_ids |
| 413 | |
| 414 | def wait_for_thread_count(self, thread_count, timeout_seconds=3): |
| 415 | start_time = time.time() |
| 416 | timeout_time = start_time + timeout_seconds |
| 417 | |
| 418 | actual_thread_count = 0 |
| 419 | while actual_thread_count < thread_count: |
| 420 | self.reset_test_sequence() |
| 421 | self.add_threadinfo_collection_packets() |
| 422 | |
| 423 | context = self.expect_gdbremote_sequence() |
| 424 | self.assertIsNotNone(context) |
| 425 | |
| 426 | threads = self.parse_threadinfo_packets(context) |
| 427 | self.assertIsNotNone(threads) |
| 428 | |
| 429 | actual_thread_count = len(threads) |
| 430 | |
| 431 | if time.time() > timeout_time: |
| 432 | raise Exception( |
| 433 | 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format( |
| 434 | timeout_seconds, thread_count, actual_thread_count)) |
| 435 | |
| 436 | return threads |
| 437 | |
| 438 | def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1): |
| 439 | self.test_sequence.add_log_lines( |
| 440 | [# Set the breakpoint. |
| 441 | "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind), |
| 442 | # Verify the stub could set it. |
| 443 | "send packet: $OK#00", |
| 444 | ], True) |
| 445 | |
| 446 | if (do_continue): |
| 447 | self.test_sequence.add_log_lines( |
| 448 | [# Continue the inferior. |
| 449 | "read packet: $c#00", |
| 450 | # Expect a breakpoint stop report. |
| 451 | {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }, |
| 452 | ], True) |
| 453 | |
| 454 | def add_remove_breakpoint_packets(self, address, breakpoint_kind=1): |
| 455 | self.test_sequence.add_log_lines( |
| 456 | [# Remove the breakpoint. |
| 457 | "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind), |
| 458 | # Verify the stub could unset it. |
| 459 | "send packet: $OK#00", |
| 460 | ], True) |
| 461 | |
| 462 | def add_qSupported_packets(self): |
| 463 | self.test_sequence.add_log_lines( |
| 464 | ["read packet: $qSupported#00", |
| 465 | {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}}, |
| 466 | ], True) |
| 467 | |
| 468 | _KNOWN_QSUPPORTED_STUB_FEATURES = [ |
| 469 | "augmented-libraries-svr4-read", |
| 470 | "PacketSize", |
| 471 | "QStartNoAckMode", |
Todd Fiala | 8aae4f4 | 2014-06-13 23:34:17 +0000 | [diff] [blame] | 472 | "QThreadSuffixSupported", |
Todd Fiala | 43ab82c | 2014-06-15 23:33:09 +0000 | [diff] [blame^] | 473 | "QListThreadsInStopReply", |
Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 474 | "qXfer:auxv:read", |
| 475 | "qXfer:libraries:read", |
| 476 | "qXfer:libraries-svr4:read", |
| 477 | ] |
| 478 | |
| 479 | def parse_qSupported_response(self, context): |
| 480 | self.assertIsNotNone(context) |
| 481 | |
| 482 | raw_response = context.get("qSupported_response") |
| 483 | self.assertIsNotNone(raw_response) |
| 484 | |
| 485 | # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the |
| 486 | # +,-,? is stripped from the key and set as the value. |
| 487 | supported_dict = {} |
| 488 | for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response): |
| 489 | key = match.group(1) |
| 490 | val = match.group(3) |
| 491 | |
| 492 | # key=val: store as is |
| 493 | if val and len(val) > 0: |
| 494 | supported_dict[key] = val |
| 495 | else: |
| 496 | if len(key) < 2: |
| 497 | raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}") |
| 498 | supported_type = key[-1] |
| 499 | key = key[:-1] |
| 500 | if not supported_type in ["+", "-", "?"]: |
| 501 | raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type)) |
| 502 | supported_dict[key] = supported_type |
| 503 | # Ensure we know the supported element |
| 504 | if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES: |
| 505 | raise Exception("unknown qSupported stub feature reported: %s" % key) |
| 506 | |
| 507 | return supported_dict |
| 508 | |
| 509 | def run_process_then_stop(self, run_seconds=1): |
| 510 | # Tell the stub to continue. |
| 511 | self.test_sequence.add_log_lines( |
| 512 | ["read packet: $vCont;c#00"], |
| 513 | True) |
| 514 | context = self.expect_gdbremote_sequence() |
| 515 | |
| 516 | # Wait for run_seconds. |
| 517 | time.sleep(run_seconds) |
| 518 | |
| 519 | # Send an interrupt, capture a T response. |
| 520 | self.reset_test_sequence() |
| 521 | self.test_sequence.add_log_lines( |
| 522 | ["read packet: {}".format(chr(03)), |
| 523 | {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }], |
| 524 | True) |
| 525 | context = self.expect_gdbremote_sequence() |
| 526 | self.assertIsNotNone(context) |
| 527 | self.assertIsNotNone(context.get("stop_result")) |
| 528 | |
| 529 | return context |
| 530 | |
| 531 | def select_modifiable_register(self, reg_infos): |
| 532 | """Find a register that can be read/written freely.""" |
| 533 | PREFERRED_REGISTER_NAMES = sets.Set(["rax",]) |
| 534 | |
| 535 | # First check for the first register from the preferred register name set. |
| 536 | alternative_register_index = None |
| 537 | |
| 538 | self.assertIsNotNone(reg_infos) |
| 539 | for reg_info in reg_infos: |
| 540 | if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES): |
| 541 | # We found a preferred register. Use it. |
| 542 | return reg_info["lldb_register_index"] |
| 543 | if ("generic" in reg_info) and (reg_info["generic"] == "fp"): |
| 544 | # A frame pointer register will do as a register to modify temporarily. |
| 545 | alternative_register_index = reg_info["lldb_register_index"] |
| 546 | |
| 547 | # We didn't find a preferred register. Return whatever alternative register |
| 548 | # we found, if any. |
| 549 | return alternative_register_index |
| 550 | |
| 551 | def extract_registers_from_stop_notification(self, stop_key_vals_text): |
| 552 | self.assertIsNotNone(stop_key_vals_text) |
| 553 | kv_dict = self.parse_key_val_dict(stop_key_vals_text) |
| 554 | |
| 555 | registers = {} |
| 556 | for (key, val) in kv_dict.items(): |
| 557 | if re.match(r"^[0-9a-fA-F]+", key): |
| 558 | registers[int(key, 16)] = val |
| 559 | return registers |
| 560 | |
| 561 | def gather_register_infos(self): |
| 562 | self.reset_test_sequence() |
| 563 | self.add_register_info_collection_packets() |
| 564 | |
| 565 | context = self.expect_gdbremote_sequence() |
| 566 | self.assertIsNotNone(context) |
| 567 | |
| 568 | reg_infos = self.parse_register_info_packets(context) |
| 569 | self.assertIsNotNone(reg_infos) |
| 570 | self.add_lldb_register_index(reg_infos) |
| 571 | |
| 572 | return reg_infos |
| 573 | |
| 574 | def find_generic_register_with_name(self, reg_infos, generic_name): |
| 575 | self.assertIsNotNone(reg_infos) |
| 576 | for reg_info in reg_infos: |
| 577 | if ("generic" in reg_info) and (reg_info["generic"] == generic_name): |
| 578 | return reg_info |
| 579 | return None |
| 580 | |
| 581 | |