blob: de9b4818717c1928ca450bafabb0571b6562a3c7 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fialae50b2e42014-06-13 19:11:33 +00006import unittest2
7import pexpect
8import platform
9import sets
10import signal
11import socket
12import subprocess
13import sys
14import time
15from lldbtest import *
16from lldbgdbserverutils import *
17import logging
18import os.path
19
20class GdbRemoteTestCaseBase(TestBase):
21
22 mydir = TestBase.compute_mydir(__file__)
23
24 port = 12345
25
26 _TIMEOUT_SECONDS = 5
27
28 _GDBREMOTE_KILL_PACKET = "$k#6b"
29
30 _LOGGING_LEVEL = logging.WARNING
31 # _LOGGING_LEVEL = logging.DEBUG
32
33 _STARTUP_ATTACH = "attach"
34 _STARTUP_LAUNCH = "launch"
35
36 # GDB Signal numbers that are not target-specific used for common exceptions
37 TARGET_EXC_BAD_ACCESS = 0x91
38 TARGET_EXC_BAD_INSTRUCTION = 0x92
39 TARGET_EXC_ARITHMETIC = 0x93
40 TARGET_EXC_EMULATION = 0x94
41 TARGET_EXC_SOFTWARE = 0x95
42 TARGET_EXC_BREAKPOINT = 0x96
43
44 def setUp(self):
45 TestBase.setUp(self)
46 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
47 logging.basicConfig(format=FORMAT)
48 self.logger = logging.getLogger(__name__)
49 self.logger.setLevel(self._LOGGING_LEVEL)
50 self.test_sequence = GdbRemoteTestSequence(self.logger)
51 self.set_inferior_startup_launch()
52
53 # Uncomment this code to force only a single test to run (by name).
54 #if not re.search(r"P_", self._testMethodName):
55 # self.skipTest("focusing on one test")
56
57 def reset_test_sequence(self):
58 self.test_sequence = GdbRemoteTestSequence(self.logger)
59
60 def init_llgs_test(self):
61 self.debug_monitor_exe = get_lldb_gdbserver_exe()
62 if not self.debug_monitor_exe:
63 self.skipTest("lldb_gdbserver exe not found")
Todd Fiala8aae4f42014-06-13 23:34:17 +000064 self.debug_monitor_extra_args = " -c 'log enable -T -f process-{}.log lldb break process thread' -c 'log enable -T -f packets-{}.log gdb-remote packets'".format(self.id(), self.id(), self.id())
Todd Fialae50b2e42014-06-13 19:11:33 +000065
66 def init_debugserver_test(self):
67 self.debug_monitor_exe = get_debugserver_exe()
68 if not self.debug_monitor_exe:
69 self.skipTest("debugserver exe not found")
70 self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName)
71
72 def create_socket(self):
73 sock = socket.socket()
74 logger = self.logger
75
76 def shutdown_socket():
77 if sock:
78 try:
79 # send the kill packet so lldb-gdbserver shuts down gracefully
80 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
81 except:
82 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
83
84 try:
85 sock.close()
86 except:
87 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
88
89 self.addTearDownHook(shutdown_socket)
90
91 sock.connect(('localhost', self.port))
92 return sock
93
94 def set_inferior_startup_launch(self):
95 self._inferior_startup = self._STARTUP_LAUNCH
96
97 def set_inferior_startup_attach(self):
98 self._inferior_startup = self._STARTUP_ATTACH
99
Todd Fiala8aae4f42014-06-13 23:34:17 +0000100 def launch_debug_monitor(self, attach_pid=None):
101 # Create the command line.
Todd Fialae50b2e42014-06-13 19:11:33 +0000102 commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port)
103 if attach_pid:
104 commandline += " --attach=%d" % attach_pid
105
Todd Fiala8aae4f42014-06-13 23:34:17 +0000106 # Start the server.
Todd Fialae50b2e42014-06-13 19:11:33 +0000107 server = pexpect.spawn(commandline)
108
109 # Turn on logging for what the child sends back.
110 if self.TraceOn():
111 server.logfile_read = sys.stdout
112
Todd Fiala8aae4f42014-06-13 23:34:17 +0000113 return server
114
115 def connect_to_debug_monitor(self, attach_pid=None):
116 server = self.launch_debug_monitor(attach_pid=attach_pid)
117
118 # Wait until we receive the server ready message before continuing.
119 server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port))
120
Todd Fialae50b2e42014-06-13 19:11:33 +0000121 # Schedule debug monitor to be shut down during teardown.
122 logger = self.logger
123 def shutdown_debug_monitor():
124 try:
125 server.close()
126 except:
127 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
Todd Fialae50b2e42014-06-13 19:11:33 +0000128 self.addTearDownHook(shutdown_debug_monitor)
129
Todd Fiala8aae4f42014-06-13 23:34:17 +0000130 attempts = 0
131 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000132
Todd Fiala8aae4f42014-06-13 23:34:17 +0000133 while attempts < MAX_ATTEMPTS:
134 # Create a socket to talk to the server
135 try:
136 self.sock = self.create_socket()
137 return server
138 except socket.error as serr:
139 # We're only trying to handle connection refused
140 if serr.errno != errno.ECONNREFUSED:
141 raise serr
Todd Fialae50b2e42014-06-13 19:11:33 +0000142
Todd Fiala8aae4f42014-06-13 23:34:17 +0000143 # Increment attempts.
144 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
145 attempts += 1
146
147 # And wait a second before next attempt.
148 time.sleep(1)
149
150 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000151
152 def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3):
153 # We're going to start a child process that the debug monitor stub can later attach to.
154 # This process needs to be started so that it just hangs around for a while. We'll
155 # have it sleep.
156 exe_path = os.path.abspath("a.out")
157
158 args = [exe_path]
159 if inferior_args:
160 args.extend(inferior_args)
161 if sleep_seconds:
162 args.append("sleep:%d" % sleep_seconds)
163
164 return subprocess.Popen(args)
165
166 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3):
167 """Prep the debug monitor, the inferior, and the expected packet stream.
168
169 Handle the separate cases of using the debug monitor in attach-to-inferior mode
170 and in launch-inferior mode.
171
172 For attach-to-inferior mode, the inferior process is first started, then
173 the debug monitor is started in attach to pid mode (using --attach on the
174 stub command line), and the no-ack-mode setup is appended to the packet
175 stream. The packet stream is not yet executed, ready to have more expected
176 packet entries added to it.
177
178 For launch-inferior mode, the stub is first started, then no ack mode is
179 setup on the expected packet stream, then the verified launch packets are added
180 to the expected socket stream. The packet stream is not yet executed, ready
181 to have more expected packet entries added to it.
182
183 The return value is:
184 {inferior:<inferior>, server:<server>}
185 """
186 inferior = None
187 attach_pid = None
188
189 if self._inferior_startup == self._STARTUP_ATTACH:
190 # Launch the process that we'll use as the inferior.
191 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds)
192 self.assertIsNotNone(inferior)
193 self.assertTrue(inferior.pid > 0)
194 attach_pid = inferior.pid
195
196 # Launch the debug monitor stub, attaching to the inferior.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000197 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000198 self.assertIsNotNone(server)
199
200 if self._inferior_startup == self._STARTUP_LAUNCH:
201 # Build launch args
202 launch_args = [os.path.abspath('a.out')]
203 if inferior_args:
204 launch_args.extend(inferior_args)
205
206 # Build the expected protocol stream
207 self.add_no_ack_remote_stream()
208 if self._inferior_startup == self._STARTUP_LAUNCH:
209 self.add_verified_launch_packets(launch_args)
210
211 return {"inferior":inferior, "server":server}
212
213 def add_no_ack_remote_stream(self):
214 self.test_sequence.add_log_lines(
215 ["read packet: +",
216 "read packet: $QStartNoAckMode#b0",
217 "send packet: +",
218 "send packet: $OK#9a",
219 "read packet: +"],
220 True)
221
222 def add_verified_launch_packets(self, launch_args):
223 self.test_sequence.add_log_lines(
224 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
225 "send packet: $OK#00",
226 "read packet: $qLaunchSuccess#a5",
227 "send packet: $OK#00"],
228 True)
229
230 def add_thread_suffix_request_packets(self):
231 self.test_sequence.add_log_lines(
232 ["read packet: $QThreadSuffixSupported#00",
233 "send packet: $OK#00",
234 ], True)
235
236 def add_process_info_collection_packets(self):
237 self.test_sequence.add_log_lines(
238 ["read packet: $qProcessInfo#00",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000239 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000240 True)
241
242 _KNOWN_PROCESS_INFO_KEYS = [
243 "pid",
244 "parent-pid",
245 "real-uid",
246 "real-gid",
247 "effective-uid",
248 "effective-gid",
249 "cputype",
250 "cpusubtype",
251 "ostype",
252 "vendor",
253 "endian",
254 "ptrsize"
255 ]
256
257 def parse_process_info_response(self, context):
258 # Ensure we have a process info response.
259 self.assertIsNotNone(context)
260 process_info_raw = context.get("process_info_raw")
261 self.assertIsNotNone(process_info_raw)
262
263 # Pull out key:value; pairs.
264 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
265
266 # Validate keys are known.
267 for (key, val) in process_info_dict.items():
268 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
269 self.assertIsNotNone(val)
270
271 return process_info_dict
272
273 def add_register_info_collection_packets(self):
274 self.test_sequence.add_log_lines(
275 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
276 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
277 "save_key":"reg_info_responses" } ],
278 True)
279
280 def parse_register_info_packets(self, context):
281 """Return an array of register info dictionaries, one per register info."""
282 reg_info_responses = context.get("reg_info_responses")
283 self.assertIsNotNone(reg_info_responses)
284
285 # Parse register infos.
286 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
287
Todd Fiala50a211b2014-06-14 22:00:36 +0000288 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000289 if not timeout_seconds:
290 timeout_seconds = self._TIMEOUT_SECONDS
291 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000292
293 _KNOWN_REGINFO_KEYS = [
294 "name",
295 "alt-name",
296 "bitsize",
297 "offset",
298 "encoding",
299 "format",
300 "set",
301 "gcc",
302 "dwarf",
303 "generic",
304 "container-regs",
305 "invalidate-regs"
306 ]
307
308 def assert_valid_reg_info(self, reg_info):
309 # Assert we know about all the reginfo keys parsed.
310 for key in reg_info:
311 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
312
313 # Check the bare-minimum expected set of register info keys.
314 self.assertTrue("name" in reg_info)
315 self.assertTrue("bitsize" in reg_info)
316 self.assertTrue("offset" in reg_info)
317 self.assertTrue("encoding" in reg_info)
318 self.assertTrue("format" in reg_info)
319
320 def find_pc_reg_info(self, reg_infos):
321 lldb_reg_index = 0
322 for reg_info in reg_infos:
323 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
324 return (lldb_reg_index, reg_info)
325 lldb_reg_index += 1
326
327 return (None, None)
328
329 def add_lldb_register_index(self, reg_infos):
330 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
331
332 We'll use this when we want to call packets like P/p with a register index but do so
333 on only a subset of the full register info set.
334 """
335 self.assertIsNotNone(reg_infos)
336
337 reg_index = 0
338 for reg_info in reg_infos:
339 reg_info["lldb_register_index"] = reg_index
340 reg_index += 1
341
342 def add_query_memory_region_packets(self, address):
343 self.test_sequence.add_log_lines(
344 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
345 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
346 True)
347
Todd Fialac30281a2014-06-14 03:03:23 +0000348 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000349 self.assertIsNotNone(key_val_text)
350 kv_dict = {}
351 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000352 key = match.group(1)
353 val = match.group(2)
354 if key in kv_dict:
355 if allow_dupes:
356 if type(kv_dict[key]) == list:
357 kv_dict[key].append(val)
358 else:
359 # Promote to list
360 kv_dict[key] = [kv_dict[key], val]
361 else:
362 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
363 else:
364 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000365 return kv_dict
366
367 def parse_memory_region_packet(self, context):
368 # Ensure we have a context.
369 self.assertIsNotNone(context.get("memory_region_response"))
370
371 # Pull out key:value; pairs.
372 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
373
374 # Validate keys are known.
375 for (key, val) in mem_region_dict.items():
376 self.assertTrue(key in ["start", "size", "permissions", "error"])
377 self.assertIsNotNone(val)
378
379 # Return the dictionary of key-value pairs for the memory region.
380 return mem_region_dict
381
382 def assert_address_within_memory_region(self, test_address, mem_region_dict):
383 self.assertIsNotNone(mem_region_dict)
384 self.assertTrue("start" in mem_region_dict)
385 self.assertTrue("size" in mem_region_dict)
386
387 range_start = int(mem_region_dict["start"], 16)
388 range_size = int(mem_region_dict["size"], 16)
389 range_end = range_start + range_size
390
391 if test_address < range_start:
392 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
393 elif test_address >= range_end:
394 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
395
396 def add_threadinfo_collection_packets(self):
397 self.test_sequence.add_log_lines(
398 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
399 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
400 "save_key":"threadinfo_responses" } ],
401 True)
402
403 def parse_threadinfo_packets(self, context):
404 """Return an array of thread ids (decimal ints), one per thread."""
405 threadinfo_responses = context.get("threadinfo_responses")
406 self.assertIsNotNone(threadinfo_responses)
407
408 thread_ids = []
409 for threadinfo_response in threadinfo_responses:
410 new_thread_infos = parse_threadinfo_response(threadinfo_response)
411 thread_ids.extend(new_thread_infos)
412 return thread_ids
413
414 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
415 start_time = time.time()
416 timeout_time = start_time + timeout_seconds
417
418 actual_thread_count = 0
419 while actual_thread_count < thread_count:
420 self.reset_test_sequence()
421 self.add_threadinfo_collection_packets()
422
423 context = self.expect_gdbremote_sequence()
424 self.assertIsNotNone(context)
425
426 threads = self.parse_threadinfo_packets(context)
427 self.assertIsNotNone(threads)
428
429 actual_thread_count = len(threads)
430
431 if time.time() > timeout_time:
432 raise Exception(
433 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
434 timeout_seconds, thread_count, actual_thread_count))
435
436 return threads
437
438 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
439 self.test_sequence.add_log_lines(
440 [# Set the breakpoint.
441 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
442 # Verify the stub could set it.
443 "send packet: $OK#00",
444 ], True)
445
446 if (do_continue):
447 self.test_sequence.add_log_lines(
448 [# Continue the inferior.
449 "read packet: $c#00",
450 # Expect a breakpoint stop report.
451 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
452 ], True)
453
454 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
455 self.test_sequence.add_log_lines(
456 [# Remove the breakpoint.
457 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
458 # Verify the stub could unset it.
459 "send packet: $OK#00",
460 ], True)
461
462 def add_qSupported_packets(self):
463 self.test_sequence.add_log_lines(
464 ["read packet: $qSupported#00",
465 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
466 ], True)
467
468 _KNOWN_QSUPPORTED_STUB_FEATURES = [
469 "augmented-libraries-svr4-read",
470 "PacketSize",
471 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000472 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000473 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000474 "qXfer:auxv:read",
475 "qXfer:libraries:read",
476 "qXfer:libraries-svr4:read",
477 ]
478
479 def parse_qSupported_response(self, context):
480 self.assertIsNotNone(context)
481
482 raw_response = context.get("qSupported_response")
483 self.assertIsNotNone(raw_response)
484
485 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
486 # +,-,? is stripped from the key and set as the value.
487 supported_dict = {}
488 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
489 key = match.group(1)
490 val = match.group(3)
491
492 # key=val: store as is
493 if val and len(val) > 0:
494 supported_dict[key] = val
495 else:
496 if len(key) < 2:
497 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
498 supported_type = key[-1]
499 key = key[:-1]
500 if not supported_type in ["+", "-", "?"]:
501 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
502 supported_dict[key] = supported_type
503 # Ensure we know the supported element
504 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
505 raise Exception("unknown qSupported stub feature reported: %s" % key)
506
507 return supported_dict
508
509 def run_process_then_stop(self, run_seconds=1):
510 # Tell the stub to continue.
511 self.test_sequence.add_log_lines(
512 ["read packet: $vCont;c#00"],
513 True)
514 context = self.expect_gdbremote_sequence()
515
516 # Wait for run_seconds.
517 time.sleep(run_seconds)
518
519 # Send an interrupt, capture a T response.
520 self.reset_test_sequence()
521 self.test_sequence.add_log_lines(
522 ["read packet: {}".format(chr(03)),
523 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
524 True)
525 context = self.expect_gdbremote_sequence()
526 self.assertIsNotNone(context)
527 self.assertIsNotNone(context.get("stop_result"))
528
529 return context
530
531 def select_modifiable_register(self, reg_infos):
532 """Find a register that can be read/written freely."""
533 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
534
535 # First check for the first register from the preferred register name set.
536 alternative_register_index = None
537
538 self.assertIsNotNone(reg_infos)
539 for reg_info in reg_infos:
540 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
541 # We found a preferred register. Use it.
542 return reg_info["lldb_register_index"]
543 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
544 # A frame pointer register will do as a register to modify temporarily.
545 alternative_register_index = reg_info["lldb_register_index"]
546
547 # We didn't find a preferred register. Return whatever alternative register
548 # we found, if any.
549 return alternative_register_index
550
551 def extract_registers_from_stop_notification(self, stop_key_vals_text):
552 self.assertIsNotNone(stop_key_vals_text)
553 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
554
555 registers = {}
556 for (key, val) in kv_dict.items():
557 if re.match(r"^[0-9a-fA-F]+", key):
558 registers[int(key, 16)] = val
559 return registers
560
561 def gather_register_infos(self):
562 self.reset_test_sequence()
563 self.add_register_info_collection_packets()
564
565 context = self.expect_gdbremote_sequence()
566 self.assertIsNotNone(context)
567
568 reg_infos = self.parse_register_info_packets(context)
569 self.assertIsNotNone(reg_infos)
570 self.add_lldb_register_index(reg_infos)
571
572 return reg_infos
573
574 def find_generic_register_with_name(self, reg_infos, generic_name):
575 self.assertIsNotNone(reg_infos)
576 for reg_info in reg_infos:
577 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
578 return reg_info
579 return None
580
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000581 def decode_gdbremote_binary(self, encoded_bytes):
582 decoded_bytes = ""
583 i = 0
584 while i < len(encoded_bytes):
585 if encoded_bytes[i] == "}":
586 # Handle escaped char.
587 self.assertTrue(i + 1 < len(encoded_bytes))
588 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
589 i +=2
590 elif encoded_bytes[i] == "*":
591 # Handle run length encoding.
592 self.assertTrue(len(decoded_bytes) > 0)
593 self.assertTrue(i + 1 < len(encoded_bytes))
594 repeat_count = ord(encoded_bytes[i+1]) - 29
595 decoded_bytes += decoded_bytes[-1] * repeat_count
596 i += 2
597 else:
598 decoded_bytes += encoded_bytes[i]
599 i += 1
600 return decoded_bytes
601
602 def build_auxv_dict(self, endian, word_size, auxv_data):
603 self.assertIsNotNone(endian)
604 self.assertIsNotNone(word_size)
605 self.assertIsNotNone(auxv_data)
606
607 auxv_dict = {}
608
609 while len(auxv_data) > 0:
610 # Chop off key.
611 raw_key = auxv_data[:word_size]
612 auxv_data = auxv_data[word_size:]
613
614 # Chop of value.
615 raw_value = auxv_data[:word_size]
616 auxv_data = auxv_data[word_size:]
617
618 # Convert raw text from target endian.
619 key = unpack_endian_binary_string(endian, raw_key)
620 value = unpack_endian_binary_string(endian, raw_value)
621
622 # Handle ending entry.
623 if key == 0:
624 self.assertEquals(value, 0)
625 return auxv_dict
626
627 # The key should not already be present.
628 self.assertFalse(key in auxv_dict)
629 auxv_dict[key] = value
630
631 self.fail("should not reach here - implies required double zero entry not found")
632 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000633
634 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
635 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
636 offset = 0
637 done = False
638 decoded_data = ""
639
640 while not done:
641 # Grab the next iteration of data.
642 self.reset_test_sequence()
643 self.test_sequence.add_log_lines([
644 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000645 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000646 ], True)
647
648 context = self.expect_gdbremote_sequence()
649 self.assertIsNotNone(context)
650
651 response_type = context.get("response_type")
652 self.assertIsNotNone(response_type)
653 self.assertTrue(response_type in ["l", "m"])
654
655 # Move offset along.
656 offset += chunk_length
657
658 # Figure out if we're done. We're done if the response type is l.
659 done = response_type == "l"
660
661 # Decode binary data.
662 content_raw = context.get("content_raw")
663 if content_raw and len(content_raw) > 0:
664 self.assertIsNotNone(content_raw)
665 decoded_data += self.decode_gdbremote_binary(content_raw)
666 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000667
668 def add_interrupt_packets(self):
669 self.test_sequence.add_log_lines([
670 # Send the intterupt.
671 "read packet: {}".format(chr(03)),
672 # And wait for the stop notification.
673 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
674 ], True)
675
676 def parse_interrupt_packets(self, context):
677 self.assertIsNotNone(context.get("stop_signo"))
678 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000679 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
680
681 def add_QSaveRegisterState_packets(self, thread_id):
682 if thread_id:
683 # Use the thread suffix form.
684 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
685 else:
686 request = "read packet: $QSaveRegisterState#00"
687
688 self.test_sequence.add_log_lines([
689 request,
690 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
691 ], True)
692
693 def parse_QSaveRegisterState_response(self, context):
694 self.assertIsNotNone(context)
695
696 save_response = context.get("save_response")
697 self.assertIsNotNone(save_response)
698
699 if len(save_response) < 1 or save_response[0] == "E":
700 # error received
701 return (False, None)
702 else:
703 return (True, int(save_response))
704
705 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
706 if thread_id:
707 # Use the thread suffix form.
708 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
709 else:
710 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
711
712 self.test_sequence.add_log_lines([
713 request,
714 "send packet: $OK#00"
715 ], True)
716
717 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
718 self.assertIsNotNone(reg_infos)
719
720 successful_writes = 0
721 failed_writes = 0
722
723 for reg_info in reg_infos:
724 # Use the lldb register index added to the reg info. We're not necessarily
725 # working off a full set of register infos, so an inferred register index could be wrong.
726 reg_index = reg_info["lldb_register_index"]
727 self.assertIsNotNone(reg_index)
728
729 reg_byte_size = int(reg_info["bitsize"])/8
730 self.assertTrue(reg_byte_size > 0)
731
732 # Handle thread suffix.
733 if thread_id:
734 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
735 else:
736 p_request = "read packet: $p{:x}#00".format(reg_index)
737
738 # Read the existing value.
739 self.reset_test_sequence()
740 self.test_sequence.add_log_lines([
741 p_request,
742 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
743 ], True)
744 context = self.expect_gdbremote_sequence()
745 self.assertIsNotNone(context)
746
747 # Verify the response length.
748 p_response = context.get("p_response")
749 self.assertIsNotNone(p_response)
750 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
751
752 # Flip the value by xoring with all 1s
753 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
754 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
755 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
756
757 # Handle thread suffix for P.
758 if thread_id:
759 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
760 else:
761 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
762
763 # Write the flipped value to the register.
764 self.reset_test_sequence()
765 self.test_sequence.add_log_lines([
766 P_request,
767 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
768 ], True)
769 context = self.expect_gdbremote_sequence()
770 self.assertIsNotNone(context)
771
772 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
773 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
774 # all flipping perfectly.
775 P_response = context.get("P_response")
776 self.assertIsNotNone(P_response)
777 if P_response == "OK":
778 successful_writes += 1
779 else:
780 failed_writes += 1
781 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
782
783 # Read back the register value, ensure it matches the flipped value.
784 if P_response == "OK":
785 self.reset_test_sequence()
786 self.test_sequence.add_log_lines([
787 p_request,
788 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
789 ], True)
790 context = self.expect_gdbremote_sequence()
791 self.assertIsNotNone(context)
792
793 verify_p_response_raw = context.get("p_response")
794 self.assertIsNotNone(verify_p_response_raw)
795 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
796
797 if verify_bits != flipped_bits_int:
798 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
799 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
800 successful_writes -= 1
801 failed_writes +=1
802
803 return (successful_writes, failed_writes)
804
805 def is_bit_flippable_register(self, reg_info):
806 if not reg_info:
807 return False
808 if not "set" in reg_info:
809 return False
810 if reg_info["set"] != "General Purpose Registers":
811 return False
812 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
813 # Don't try to bit flip registers contained in another register.
814 return False
815 if re.match("^.s$", reg_info["name"]):
816 # This is a 2-letter register name that ends in "s", like a segment register.
817 # Don't try to bit flip these.
818 return False
819 # Okay, this looks fine-enough.
820 return True
821
822 def read_register_values(self, reg_infos, endian, thread_id=None):
823 self.assertIsNotNone(reg_infos)
824 values = {}
825
826 for reg_info in reg_infos:
827 # We append a register index when load reg infos so we can work with subsets.
828 reg_index = reg_info.get("lldb_register_index")
829 self.assertIsNotNone(reg_index)
830
831 # Handle thread suffix.
832 if thread_id:
833 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
834 else:
835 p_request = "read packet: $p{:x}#00".format(reg_index)
836
837 # Read it with p.
838 self.reset_test_sequence()
839 self.test_sequence.add_log_lines([
840 p_request,
841 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
842 ], True)
843 context = self.expect_gdbremote_sequence()
844 self.assertIsNotNone(context)
845
846 # Convert value from target endian to integral.
847 p_response = context.get("p_response")
848 self.assertIsNotNone(p_response)
849 self.assertTrue(len(p_response) > 0)
850 self.assertFalse(p_response[0] == "E")
851
852 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
853
854 return values