Todd Fiala | e50b2e4 | 2014-06-13 19:11:33 +0000 | [diff] [blame] | 1 | """ |
| 2 | Base class for gdb-remote test cases. |
| 3 | """ |
| 4 | |
| 5 | import unittest2 |
| 6 | import pexpect |
| 7 | import platform |
| 8 | import sets |
| 9 | import signal |
| 10 | import socket |
| 11 | import subprocess |
| 12 | import sys |
| 13 | import time |
| 14 | from lldbtest import * |
| 15 | from lldbgdbserverutils import * |
| 16 | import logging |
| 17 | import os.path |
| 18 | |
| 19 | class GdbRemoteTestCaseBase(TestBase): |
| 20 | |
| 21 | mydir = TestBase.compute_mydir(__file__) |
| 22 | |
| 23 | port = 12345 |
| 24 | |
| 25 | _TIMEOUT_SECONDS = 5 |
| 26 | |
| 27 | _GDBREMOTE_KILL_PACKET = "$k#6b" |
| 28 | |
| 29 | _LOGGING_LEVEL = logging.WARNING |
| 30 | # _LOGGING_LEVEL = logging.DEBUG |
| 31 | |
| 32 | _STARTUP_ATTACH = "attach" |
| 33 | _STARTUP_LAUNCH = "launch" |
| 34 | |
| 35 | # GDB Signal numbers that are not target-specific used for common exceptions |
| 36 | TARGET_EXC_BAD_ACCESS = 0x91 |
| 37 | TARGET_EXC_BAD_INSTRUCTION = 0x92 |
| 38 | TARGET_EXC_ARITHMETIC = 0x93 |
| 39 | TARGET_EXC_EMULATION = 0x94 |
| 40 | TARGET_EXC_SOFTWARE = 0x95 |
| 41 | TARGET_EXC_BREAKPOINT = 0x96 |
| 42 | |
| 43 | def setUp(self): |
| 44 | TestBase.setUp(self) |
| 45 | FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s' |
| 46 | logging.basicConfig(format=FORMAT) |
| 47 | self.logger = logging.getLogger(__name__) |
| 48 | self.logger.setLevel(self._LOGGING_LEVEL) |
| 49 | self.test_sequence = GdbRemoteTestSequence(self.logger) |
| 50 | self.set_inferior_startup_launch() |
| 51 | |
| 52 | # Uncomment this code to force only a single test to run (by name). |
| 53 | #if not re.search(r"P_", self._testMethodName): |
| 54 | # self.skipTest("focusing on one test") |
| 55 | |
| 56 | def reset_test_sequence(self): |
| 57 | self.test_sequence = GdbRemoteTestSequence(self.logger) |
| 58 | |
| 59 | def init_llgs_test(self): |
| 60 | self.debug_monitor_exe = get_lldb_gdbserver_exe() |
| 61 | if not self.debug_monitor_exe: |
| 62 | self.skipTest("lldb_gdbserver exe not found") |
| 63 | self.debug_monitor_extra_args = "" |
| 64 | |
| 65 | def init_debugserver_test(self): |
| 66 | self.debug_monitor_exe = get_debugserver_exe() |
| 67 | if not self.debug_monitor_exe: |
| 68 | self.skipTest("debugserver exe not found") |
| 69 | self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName) |
| 70 | |
| 71 | def create_socket(self): |
| 72 | sock = socket.socket() |
| 73 | logger = self.logger |
| 74 | |
| 75 | def shutdown_socket(): |
| 76 | if sock: |
| 77 | try: |
| 78 | # send the kill packet so lldb-gdbserver shuts down gracefully |
| 79 | sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET) |
| 80 | except: |
| 81 | logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0])) |
| 82 | |
| 83 | try: |
| 84 | sock.close() |
| 85 | except: |
| 86 | logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0])) |
| 87 | |
| 88 | self.addTearDownHook(shutdown_socket) |
| 89 | |
| 90 | sock.connect(('localhost', self.port)) |
| 91 | return sock |
| 92 | |
| 93 | def set_inferior_startup_launch(self): |
| 94 | self._inferior_startup = self._STARTUP_LAUNCH |
| 95 | |
| 96 | def set_inferior_startup_attach(self): |
| 97 | self._inferior_startup = self._STARTUP_ATTACH |
| 98 | |
| 99 | def start_server(self, attach_pid=None): |
| 100 | # Create the command line |
| 101 | commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port) |
| 102 | if attach_pid: |
| 103 | commandline += " --attach=%d" % attach_pid |
| 104 | |
| 105 | # start the server |
| 106 | server = pexpect.spawn(commandline) |
| 107 | |
| 108 | # Turn on logging for what the child sends back. |
| 109 | if self.TraceOn(): |
| 110 | server.logfile_read = sys.stdout |
| 111 | |
| 112 | # Schedule debug monitor to be shut down during teardown. |
| 113 | logger = self.logger |
| 114 | def shutdown_debug_monitor(): |
| 115 | try: |
| 116 | server.close() |
| 117 | except: |
| 118 | logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0])) |
| 119 | |
| 120 | self.addTearDownHook(shutdown_debug_monitor) |
| 121 | |
| 122 | # Wait until we receive the server ready message before continuing. |
| 123 | server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port)) |
| 124 | |
| 125 | # Create a socket to talk to the server |
| 126 | self.sock = self.create_socket() |
| 127 | |
| 128 | return server |
| 129 | |
| 130 | def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3): |
| 131 | # We're going to start a child process that the debug monitor stub can later attach to. |
| 132 | # This process needs to be started so that it just hangs around for a while. We'll |
| 133 | # have it sleep. |
| 134 | exe_path = os.path.abspath("a.out") |
| 135 | |
| 136 | args = [exe_path] |
| 137 | if inferior_args: |
| 138 | args.extend(inferior_args) |
| 139 | if sleep_seconds: |
| 140 | args.append("sleep:%d" % sleep_seconds) |
| 141 | |
| 142 | return subprocess.Popen(args) |
| 143 | |
| 144 | def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3): |
| 145 | """Prep the debug monitor, the inferior, and the expected packet stream. |
| 146 | |
| 147 | Handle the separate cases of using the debug monitor in attach-to-inferior mode |
| 148 | and in launch-inferior mode. |
| 149 | |
| 150 | For attach-to-inferior mode, the inferior process is first started, then |
| 151 | the debug monitor is started in attach to pid mode (using --attach on the |
| 152 | stub command line), and the no-ack-mode setup is appended to the packet |
| 153 | stream. The packet stream is not yet executed, ready to have more expected |
| 154 | packet entries added to it. |
| 155 | |
| 156 | For launch-inferior mode, the stub is first started, then no ack mode is |
| 157 | setup on the expected packet stream, then the verified launch packets are added |
| 158 | to the expected socket stream. The packet stream is not yet executed, ready |
| 159 | to have more expected packet entries added to it. |
| 160 | |
| 161 | The return value is: |
| 162 | {inferior:<inferior>, server:<server>} |
| 163 | """ |
| 164 | inferior = None |
| 165 | attach_pid = None |
| 166 | |
| 167 | if self._inferior_startup == self._STARTUP_ATTACH: |
| 168 | # Launch the process that we'll use as the inferior. |
| 169 | inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds) |
| 170 | self.assertIsNotNone(inferior) |
| 171 | self.assertTrue(inferior.pid > 0) |
| 172 | attach_pid = inferior.pid |
| 173 | |
| 174 | # Launch the debug monitor stub, attaching to the inferior. |
| 175 | server = self.start_server(attach_pid=attach_pid) |
| 176 | self.assertIsNotNone(server) |
| 177 | |
| 178 | if self._inferior_startup == self._STARTUP_LAUNCH: |
| 179 | # Build launch args |
| 180 | launch_args = [os.path.abspath('a.out')] |
| 181 | if inferior_args: |
| 182 | launch_args.extend(inferior_args) |
| 183 | |
| 184 | # Build the expected protocol stream |
| 185 | self.add_no_ack_remote_stream() |
| 186 | if self._inferior_startup == self._STARTUP_LAUNCH: |
| 187 | self.add_verified_launch_packets(launch_args) |
| 188 | |
| 189 | return {"inferior":inferior, "server":server} |
| 190 | |
| 191 | def add_no_ack_remote_stream(self): |
| 192 | self.test_sequence.add_log_lines( |
| 193 | ["read packet: +", |
| 194 | "read packet: $QStartNoAckMode#b0", |
| 195 | "send packet: +", |
| 196 | "send packet: $OK#9a", |
| 197 | "read packet: +"], |
| 198 | True) |
| 199 | |
| 200 | def add_verified_launch_packets(self, launch_args): |
| 201 | self.test_sequence.add_log_lines( |
| 202 | ["read packet: %s" % build_gdbremote_A_packet(launch_args), |
| 203 | "send packet: $OK#00", |
| 204 | "read packet: $qLaunchSuccess#a5", |
| 205 | "send packet: $OK#00"], |
| 206 | True) |
| 207 | |
| 208 | def add_thread_suffix_request_packets(self): |
| 209 | self.test_sequence.add_log_lines( |
| 210 | ["read packet: $QThreadSuffixSupported#00", |
| 211 | "send packet: $OK#00", |
| 212 | ], True) |
| 213 | |
| 214 | def add_process_info_collection_packets(self): |
| 215 | self.test_sequence.add_log_lines( |
| 216 | ["read packet: $qProcessInfo#00", |
| 217 | { "direction":"send", "regex":r"^\$(.+)#00", "capture":{1:"process_info_raw"} }], |
| 218 | True) |
| 219 | |
| 220 | _KNOWN_PROCESS_INFO_KEYS = [ |
| 221 | "pid", |
| 222 | "parent-pid", |
| 223 | "real-uid", |
| 224 | "real-gid", |
| 225 | "effective-uid", |
| 226 | "effective-gid", |
| 227 | "cputype", |
| 228 | "cpusubtype", |
| 229 | "ostype", |
| 230 | "vendor", |
| 231 | "endian", |
| 232 | "ptrsize" |
| 233 | ] |
| 234 | |
| 235 | def parse_process_info_response(self, context): |
| 236 | # Ensure we have a process info response. |
| 237 | self.assertIsNotNone(context) |
| 238 | process_info_raw = context.get("process_info_raw") |
| 239 | self.assertIsNotNone(process_info_raw) |
| 240 | |
| 241 | # Pull out key:value; pairs. |
| 242 | process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) } |
| 243 | |
| 244 | # Validate keys are known. |
| 245 | for (key, val) in process_info_dict.items(): |
| 246 | self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS) |
| 247 | self.assertIsNotNone(val) |
| 248 | |
| 249 | return process_info_dict |
| 250 | |
| 251 | def add_register_info_collection_packets(self): |
| 252 | self.test_sequence.add_log_lines( |
| 253 | [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True, |
| 254 | "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"), |
| 255 | "save_key":"reg_info_responses" } ], |
| 256 | True) |
| 257 | |
| 258 | def parse_register_info_packets(self, context): |
| 259 | """Return an array of register info dictionaries, one per register info.""" |
| 260 | reg_info_responses = context.get("reg_info_responses") |
| 261 | self.assertIsNotNone(reg_info_responses) |
| 262 | |
| 263 | # Parse register infos. |
| 264 | return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses] |
| 265 | |
| 266 | def expect_gdbremote_sequence(self): |
| 267 | return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, self._TIMEOUT_SECONDS, self.logger) |
| 268 | |
| 269 | _KNOWN_REGINFO_KEYS = [ |
| 270 | "name", |
| 271 | "alt-name", |
| 272 | "bitsize", |
| 273 | "offset", |
| 274 | "encoding", |
| 275 | "format", |
| 276 | "set", |
| 277 | "gcc", |
| 278 | "dwarf", |
| 279 | "generic", |
| 280 | "container-regs", |
| 281 | "invalidate-regs" |
| 282 | ] |
| 283 | |
| 284 | def assert_valid_reg_info(self, reg_info): |
| 285 | # Assert we know about all the reginfo keys parsed. |
| 286 | for key in reg_info: |
| 287 | self.assertTrue(key in self._KNOWN_REGINFO_KEYS) |
| 288 | |
| 289 | # Check the bare-minimum expected set of register info keys. |
| 290 | self.assertTrue("name" in reg_info) |
| 291 | self.assertTrue("bitsize" in reg_info) |
| 292 | self.assertTrue("offset" in reg_info) |
| 293 | self.assertTrue("encoding" in reg_info) |
| 294 | self.assertTrue("format" in reg_info) |
| 295 | |
| 296 | def find_pc_reg_info(self, reg_infos): |
| 297 | lldb_reg_index = 0 |
| 298 | for reg_info in reg_infos: |
| 299 | if ("generic" in reg_info) and (reg_info["generic"] == "pc"): |
| 300 | return (lldb_reg_index, reg_info) |
| 301 | lldb_reg_index += 1 |
| 302 | |
| 303 | return (None, None) |
| 304 | |
| 305 | def add_lldb_register_index(self, reg_infos): |
| 306 | """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry. |
| 307 | |
| 308 | We'll use this when we want to call packets like P/p with a register index but do so |
| 309 | on only a subset of the full register info set. |
| 310 | """ |
| 311 | self.assertIsNotNone(reg_infos) |
| 312 | |
| 313 | reg_index = 0 |
| 314 | for reg_info in reg_infos: |
| 315 | reg_info["lldb_register_index"] = reg_index |
| 316 | reg_index += 1 |
| 317 | |
| 318 | def add_query_memory_region_packets(self, address): |
| 319 | self.test_sequence.add_log_lines( |
| 320 | ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address), |
| 321 | {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }], |
| 322 | True) |
| 323 | |
| 324 | def parse_key_val_dict(self, key_val_text): |
| 325 | self.assertIsNotNone(key_val_text) |
| 326 | kv_dict = {} |
| 327 | for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text): |
| 328 | kv_dict[match.group(1)] = match.group(2) |
| 329 | return kv_dict |
| 330 | |
| 331 | def parse_memory_region_packet(self, context): |
| 332 | # Ensure we have a context. |
| 333 | self.assertIsNotNone(context.get("memory_region_response")) |
| 334 | |
| 335 | # Pull out key:value; pairs. |
| 336 | mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response")) |
| 337 | |
| 338 | # Validate keys are known. |
| 339 | for (key, val) in mem_region_dict.items(): |
| 340 | self.assertTrue(key in ["start", "size", "permissions", "error"]) |
| 341 | self.assertIsNotNone(val) |
| 342 | |
| 343 | # Return the dictionary of key-value pairs for the memory region. |
| 344 | return mem_region_dict |
| 345 | |
| 346 | def assert_address_within_memory_region(self, test_address, mem_region_dict): |
| 347 | self.assertIsNotNone(mem_region_dict) |
| 348 | self.assertTrue("start" in mem_region_dict) |
| 349 | self.assertTrue("size" in mem_region_dict) |
| 350 | |
| 351 | range_start = int(mem_region_dict["start"], 16) |
| 352 | range_size = int(mem_region_dict["size"], 16) |
| 353 | range_end = range_start + range_size |
| 354 | |
| 355 | if test_address < range_start: |
| 356 | self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size)) |
| 357 | elif test_address >= range_end: |
| 358 | self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size)) |
| 359 | |
| 360 | def add_threadinfo_collection_packets(self): |
| 361 | self.test_sequence.add_log_lines( |
| 362 | [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo", |
| 363 | "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"), |
| 364 | "save_key":"threadinfo_responses" } ], |
| 365 | True) |
| 366 | |
| 367 | def parse_threadinfo_packets(self, context): |
| 368 | """Return an array of thread ids (decimal ints), one per thread.""" |
| 369 | threadinfo_responses = context.get("threadinfo_responses") |
| 370 | self.assertIsNotNone(threadinfo_responses) |
| 371 | |
| 372 | thread_ids = [] |
| 373 | for threadinfo_response in threadinfo_responses: |
| 374 | new_thread_infos = parse_threadinfo_response(threadinfo_response) |
| 375 | thread_ids.extend(new_thread_infos) |
| 376 | return thread_ids |
| 377 | |
| 378 | def wait_for_thread_count(self, thread_count, timeout_seconds=3): |
| 379 | start_time = time.time() |
| 380 | timeout_time = start_time + timeout_seconds |
| 381 | |
| 382 | actual_thread_count = 0 |
| 383 | while actual_thread_count < thread_count: |
| 384 | self.reset_test_sequence() |
| 385 | self.add_threadinfo_collection_packets() |
| 386 | |
| 387 | context = self.expect_gdbremote_sequence() |
| 388 | self.assertIsNotNone(context) |
| 389 | |
| 390 | threads = self.parse_threadinfo_packets(context) |
| 391 | self.assertIsNotNone(threads) |
| 392 | |
| 393 | actual_thread_count = len(threads) |
| 394 | |
| 395 | if time.time() > timeout_time: |
| 396 | raise Exception( |
| 397 | 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format( |
| 398 | timeout_seconds, thread_count, actual_thread_count)) |
| 399 | |
| 400 | return threads |
| 401 | |
| 402 | def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1): |
| 403 | self.test_sequence.add_log_lines( |
| 404 | [# Set the breakpoint. |
| 405 | "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind), |
| 406 | # Verify the stub could set it. |
| 407 | "send packet: $OK#00", |
| 408 | ], True) |
| 409 | |
| 410 | if (do_continue): |
| 411 | self.test_sequence.add_log_lines( |
| 412 | [# Continue the inferior. |
| 413 | "read packet: $c#00", |
| 414 | # Expect a breakpoint stop report. |
| 415 | {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }, |
| 416 | ], True) |
| 417 | |
| 418 | def add_remove_breakpoint_packets(self, address, breakpoint_kind=1): |
| 419 | self.test_sequence.add_log_lines( |
| 420 | [# Remove the breakpoint. |
| 421 | "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind), |
| 422 | # Verify the stub could unset it. |
| 423 | "send packet: $OK#00", |
| 424 | ], True) |
| 425 | |
| 426 | def add_qSupported_packets(self): |
| 427 | self.test_sequence.add_log_lines( |
| 428 | ["read packet: $qSupported#00", |
| 429 | {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}}, |
| 430 | ], True) |
| 431 | |
| 432 | _KNOWN_QSUPPORTED_STUB_FEATURES = [ |
| 433 | "augmented-libraries-svr4-read", |
| 434 | "PacketSize", |
| 435 | "QStartNoAckMode", |
| 436 | "qXfer:auxv:read", |
| 437 | "qXfer:libraries:read", |
| 438 | "qXfer:libraries-svr4:read", |
| 439 | ] |
| 440 | |
| 441 | def parse_qSupported_response(self, context): |
| 442 | self.assertIsNotNone(context) |
| 443 | |
| 444 | raw_response = context.get("qSupported_response") |
| 445 | self.assertIsNotNone(raw_response) |
| 446 | |
| 447 | # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the |
| 448 | # +,-,? is stripped from the key and set as the value. |
| 449 | supported_dict = {} |
| 450 | for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response): |
| 451 | key = match.group(1) |
| 452 | val = match.group(3) |
| 453 | |
| 454 | # key=val: store as is |
| 455 | if val and len(val) > 0: |
| 456 | supported_dict[key] = val |
| 457 | else: |
| 458 | if len(key) < 2: |
| 459 | raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}") |
| 460 | supported_type = key[-1] |
| 461 | key = key[:-1] |
| 462 | if not supported_type in ["+", "-", "?"]: |
| 463 | raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type)) |
| 464 | supported_dict[key] = supported_type |
| 465 | # Ensure we know the supported element |
| 466 | if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES: |
| 467 | raise Exception("unknown qSupported stub feature reported: %s" % key) |
| 468 | |
| 469 | return supported_dict |
| 470 | |
| 471 | def run_process_then_stop(self, run_seconds=1): |
| 472 | # Tell the stub to continue. |
| 473 | self.test_sequence.add_log_lines( |
| 474 | ["read packet: $vCont;c#00"], |
| 475 | True) |
| 476 | context = self.expect_gdbremote_sequence() |
| 477 | |
| 478 | # Wait for run_seconds. |
| 479 | time.sleep(run_seconds) |
| 480 | |
| 481 | # Send an interrupt, capture a T response. |
| 482 | self.reset_test_sequence() |
| 483 | self.test_sequence.add_log_lines( |
| 484 | ["read packet: {}".format(chr(03)), |
| 485 | {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }], |
| 486 | True) |
| 487 | context = self.expect_gdbremote_sequence() |
| 488 | self.assertIsNotNone(context) |
| 489 | self.assertIsNotNone(context.get("stop_result")) |
| 490 | |
| 491 | return context |
| 492 | |
| 493 | def select_modifiable_register(self, reg_infos): |
| 494 | """Find a register that can be read/written freely.""" |
| 495 | PREFERRED_REGISTER_NAMES = sets.Set(["rax",]) |
| 496 | |
| 497 | # First check for the first register from the preferred register name set. |
| 498 | alternative_register_index = None |
| 499 | |
| 500 | self.assertIsNotNone(reg_infos) |
| 501 | for reg_info in reg_infos: |
| 502 | if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES): |
| 503 | # We found a preferred register. Use it. |
| 504 | return reg_info["lldb_register_index"] |
| 505 | if ("generic" in reg_info) and (reg_info["generic"] == "fp"): |
| 506 | # A frame pointer register will do as a register to modify temporarily. |
| 507 | alternative_register_index = reg_info["lldb_register_index"] |
| 508 | |
| 509 | # We didn't find a preferred register. Return whatever alternative register |
| 510 | # we found, if any. |
| 511 | return alternative_register_index |
| 512 | |
| 513 | def extract_registers_from_stop_notification(self, stop_key_vals_text): |
| 514 | self.assertIsNotNone(stop_key_vals_text) |
| 515 | kv_dict = self.parse_key_val_dict(stop_key_vals_text) |
| 516 | |
| 517 | registers = {} |
| 518 | for (key, val) in kv_dict.items(): |
| 519 | if re.match(r"^[0-9a-fA-F]+", key): |
| 520 | registers[int(key, 16)] = val |
| 521 | return registers |
| 522 | |
| 523 | def gather_register_infos(self): |
| 524 | self.reset_test_sequence() |
| 525 | self.add_register_info_collection_packets() |
| 526 | |
| 527 | context = self.expect_gdbremote_sequence() |
| 528 | self.assertIsNotNone(context) |
| 529 | |
| 530 | reg_infos = self.parse_register_info_packets(context) |
| 531 | self.assertIsNotNone(reg_infos) |
| 532 | self.add_lldb_register_index(reg_infos) |
| 533 | |
| 534 | return reg_infos |
| 535 | |
| 536 | def find_generic_register_with_name(self, reg_infos, generic_name): |
| 537 | self.assertIsNotNone(reg_infos) |
| 538 | for reg_info in reg_infos: |
| 539 | if ("generic" in reg_info) and (reg_info["generic"] == generic_name): |
| 540 | return reg_info |
| 541 | return None |
| 542 | |
| 543 | |