blob: f48131ea983f8dc747b7a4230d467fc49ce2a1e7 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fialae50b2e42014-06-13 19:11:33 +00006import unittest2
7import pexpect
8import platform
Todd Fialae2202002014-06-27 22:11:56 +00009import re
Todd Fialae50b2e42014-06-13 19:11:33 +000010import sets
11import signal
12import socket
13import subprocess
14import sys
15import time
16from lldbtest import *
17from lldbgdbserverutils import *
18import logging
19import os.path
20
21class GdbRemoteTestCaseBase(TestBase):
22
23 mydir = TestBase.compute_mydir(__file__)
24
25 port = 12345
26
27 _TIMEOUT_SECONDS = 5
28
29 _GDBREMOTE_KILL_PACKET = "$k#6b"
30
31 _LOGGING_LEVEL = logging.WARNING
32 # _LOGGING_LEVEL = logging.DEBUG
33
34 _STARTUP_ATTACH = "attach"
35 _STARTUP_LAUNCH = "launch"
36
37 # GDB Signal numbers that are not target-specific used for common exceptions
38 TARGET_EXC_BAD_ACCESS = 0x91
39 TARGET_EXC_BAD_INSTRUCTION = 0x92
40 TARGET_EXC_ARITHMETIC = 0x93
41 TARGET_EXC_EMULATION = 0x94
42 TARGET_EXC_SOFTWARE = 0x95
43 TARGET_EXC_BREAKPOINT = 0x96
44
45 def setUp(self):
46 TestBase.setUp(self)
47 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
48 logging.basicConfig(format=FORMAT)
49 self.logger = logging.getLogger(__name__)
50 self.logger.setLevel(self._LOGGING_LEVEL)
51 self.test_sequence = GdbRemoteTestSequence(self.logger)
52 self.set_inferior_startup_launch()
53
54 # Uncomment this code to force only a single test to run (by name).
55 #if not re.search(r"P_", self._testMethodName):
56 # self.skipTest("focusing on one test")
57
58 def reset_test_sequence(self):
59 self.test_sequence = GdbRemoteTestSequence(self.logger)
60
61 def init_llgs_test(self):
62 self.debug_monitor_exe = get_lldb_gdbserver_exe()
63 if not self.debug_monitor_exe:
64 self.skipTest("lldb_gdbserver exe not found")
Todd Fiala8aae4f42014-06-13 23:34:17 +000065 self.debug_monitor_extra_args = " -c 'log enable -T -f process-{}.log lldb break process thread' -c 'log enable -T -f packets-{}.log gdb-remote packets'".format(self.id(), self.id(), self.id())
Todd Fialae50b2e42014-06-13 19:11:33 +000066
67 def init_debugserver_test(self):
68 self.debug_monitor_exe = get_debugserver_exe()
69 if not self.debug_monitor_exe:
70 self.skipTest("debugserver exe not found")
71 self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName)
72
73 def create_socket(self):
74 sock = socket.socket()
75 logger = self.logger
76
77 def shutdown_socket():
78 if sock:
79 try:
80 # send the kill packet so lldb-gdbserver shuts down gracefully
81 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
82 except:
83 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
84
85 try:
86 sock.close()
87 except:
88 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
89
90 self.addTearDownHook(shutdown_socket)
91
92 sock.connect(('localhost', self.port))
93 return sock
94
95 def set_inferior_startup_launch(self):
96 self._inferior_startup = self._STARTUP_LAUNCH
97
98 def set_inferior_startup_attach(self):
99 self._inferior_startup = self._STARTUP_ATTACH
100
Todd Fiala8aae4f42014-06-13 23:34:17 +0000101 def launch_debug_monitor(self, attach_pid=None):
102 # Create the command line.
Todd Fialae50b2e42014-06-13 19:11:33 +0000103 commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port)
104 if attach_pid:
105 commandline += " --attach=%d" % attach_pid
106
Todd Fiala8aae4f42014-06-13 23:34:17 +0000107 # Start the server.
Todd Fialae50b2e42014-06-13 19:11:33 +0000108 server = pexpect.spawn(commandline)
109
110 # Turn on logging for what the child sends back.
111 if self.TraceOn():
112 server.logfile_read = sys.stdout
113
Todd Fiala8aae4f42014-06-13 23:34:17 +0000114 return server
115
116 def connect_to_debug_monitor(self, attach_pid=None):
117 server = self.launch_debug_monitor(attach_pid=attach_pid)
118
119 # Wait until we receive the server ready message before continuing.
120 server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port))
121
Todd Fialae50b2e42014-06-13 19:11:33 +0000122 # Schedule debug monitor to be shut down during teardown.
123 logger = self.logger
124 def shutdown_debug_monitor():
125 try:
126 server.close()
127 except:
128 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
Todd Fialae50b2e42014-06-13 19:11:33 +0000129 self.addTearDownHook(shutdown_debug_monitor)
130
Todd Fiala8aae4f42014-06-13 23:34:17 +0000131 attempts = 0
132 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000133
Todd Fiala8aae4f42014-06-13 23:34:17 +0000134 while attempts < MAX_ATTEMPTS:
135 # Create a socket to talk to the server
136 try:
137 self.sock = self.create_socket()
138 return server
139 except socket.error as serr:
140 # We're only trying to handle connection refused
141 if serr.errno != errno.ECONNREFUSED:
142 raise serr
Todd Fialae50b2e42014-06-13 19:11:33 +0000143
Todd Fiala8aae4f42014-06-13 23:34:17 +0000144 # Increment attempts.
145 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
146 attempts += 1
147
148 # And wait a second before next attempt.
149 time.sleep(1)
150
151 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000152
153 def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3):
154 # We're going to start a child process that the debug monitor stub can later attach to.
155 # This process needs to be started so that it just hangs around for a while. We'll
156 # have it sleep.
157 exe_path = os.path.abspath("a.out")
158
159 args = [exe_path]
160 if inferior_args:
161 args.extend(inferior_args)
162 if sleep_seconds:
163 args.append("sleep:%d" % sleep_seconds)
164
165 return subprocess.Popen(args)
166
167 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3):
168 """Prep the debug monitor, the inferior, and the expected packet stream.
169
170 Handle the separate cases of using the debug monitor in attach-to-inferior mode
171 and in launch-inferior mode.
172
173 For attach-to-inferior mode, the inferior process is first started, then
174 the debug monitor is started in attach to pid mode (using --attach on the
175 stub command line), and the no-ack-mode setup is appended to the packet
176 stream. The packet stream is not yet executed, ready to have more expected
177 packet entries added to it.
178
179 For launch-inferior mode, the stub is first started, then no ack mode is
180 setup on the expected packet stream, then the verified launch packets are added
181 to the expected socket stream. The packet stream is not yet executed, ready
182 to have more expected packet entries added to it.
183
184 The return value is:
185 {inferior:<inferior>, server:<server>}
186 """
187 inferior = None
188 attach_pid = None
189
190 if self._inferior_startup == self._STARTUP_ATTACH:
191 # Launch the process that we'll use as the inferior.
192 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds)
193 self.assertIsNotNone(inferior)
194 self.assertTrue(inferior.pid > 0)
195 attach_pid = inferior.pid
196
197 # Launch the debug monitor stub, attaching to the inferior.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000198 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000199 self.assertIsNotNone(server)
200
201 if self._inferior_startup == self._STARTUP_LAUNCH:
202 # Build launch args
203 launch_args = [os.path.abspath('a.out')]
204 if inferior_args:
205 launch_args.extend(inferior_args)
206
207 # Build the expected protocol stream
208 self.add_no_ack_remote_stream()
209 if self._inferior_startup == self._STARTUP_LAUNCH:
210 self.add_verified_launch_packets(launch_args)
211
212 return {"inferior":inferior, "server":server}
213
214 def add_no_ack_remote_stream(self):
215 self.test_sequence.add_log_lines(
216 ["read packet: +",
217 "read packet: $QStartNoAckMode#b0",
218 "send packet: +",
219 "send packet: $OK#9a",
220 "read packet: +"],
221 True)
222
223 def add_verified_launch_packets(self, launch_args):
224 self.test_sequence.add_log_lines(
225 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
226 "send packet: $OK#00",
227 "read packet: $qLaunchSuccess#a5",
228 "send packet: $OK#00"],
229 True)
230
231 def add_thread_suffix_request_packets(self):
232 self.test_sequence.add_log_lines(
233 ["read packet: $QThreadSuffixSupported#00",
234 "send packet: $OK#00",
235 ], True)
236
237 def add_process_info_collection_packets(self):
238 self.test_sequence.add_log_lines(
239 ["read packet: $qProcessInfo#00",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000240 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000241 True)
242
243 _KNOWN_PROCESS_INFO_KEYS = [
244 "pid",
245 "parent-pid",
246 "real-uid",
247 "real-gid",
248 "effective-uid",
249 "effective-gid",
250 "cputype",
251 "cpusubtype",
252 "ostype",
253 "vendor",
254 "endian",
255 "ptrsize"
256 ]
257
258 def parse_process_info_response(self, context):
259 # Ensure we have a process info response.
260 self.assertIsNotNone(context)
261 process_info_raw = context.get("process_info_raw")
262 self.assertIsNotNone(process_info_raw)
263
264 # Pull out key:value; pairs.
265 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
266
267 # Validate keys are known.
268 for (key, val) in process_info_dict.items():
269 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
270 self.assertIsNotNone(val)
271
272 return process_info_dict
273
274 def add_register_info_collection_packets(self):
275 self.test_sequence.add_log_lines(
276 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
277 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
278 "save_key":"reg_info_responses" } ],
279 True)
280
281 def parse_register_info_packets(self, context):
282 """Return an array of register info dictionaries, one per register info."""
283 reg_info_responses = context.get("reg_info_responses")
284 self.assertIsNotNone(reg_info_responses)
285
286 # Parse register infos.
287 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
288
Todd Fiala50a211b2014-06-14 22:00:36 +0000289 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000290 if not timeout_seconds:
291 timeout_seconds = self._TIMEOUT_SECONDS
292 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000293
294 _KNOWN_REGINFO_KEYS = [
295 "name",
296 "alt-name",
297 "bitsize",
298 "offset",
299 "encoding",
300 "format",
301 "set",
302 "gcc",
303 "dwarf",
304 "generic",
305 "container-regs",
306 "invalidate-regs"
307 ]
308
309 def assert_valid_reg_info(self, reg_info):
310 # Assert we know about all the reginfo keys parsed.
311 for key in reg_info:
312 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
313
314 # Check the bare-minimum expected set of register info keys.
315 self.assertTrue("name" in reg_info)
316 self.assertTrue("bitsize" in reg_info)
317 self.assertTrue("offset" in reg_info)
318 self.assertTrue("encoding" in reg_info)
319 self.assertTrue("format" in reg_info)
320
321 def find_pc_reg_info(self, reg_infos):
322 lldb_reg_index = 0
323 for reg_info in reg_infos:
324 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
325 return (lldb_reg_index, reg_info)
326 lldb_reg_index += 1
327
328 return (None, None)
329
330 def add_lldb_register_index(self, reg_infos):
331 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
332
333 We'll use this when we want to call packets like P/p with a register index but do so
334 on only a subset of the full register info set.
335 """
336 self.assertIsNotNone(reg_infos)
337
338 reg_index = 0
339 for reg_info in reg_infos:
340 reg_info["lldb_register_index"] = reg_index
341 reg_index += 1
342
343 def add_query_memory_region_packets(self, address):
344 self.test_sequence.add_log_lines(
345 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
346 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
347 True)
348
Todd Fialac30281a2014-06-14 03:03:23 +0000349 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000350 self.assertIsNotNone(key_val_text)
351 kv_dict = {}
352 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000353 key = match.group(1)
354 val = match.group(2)
355 if key in kv_dict:
356 if allow_dupes:
357 if type(kv_dict[key]) == list:
358 kv_dict[key].append(val)
359 else:
360 # Promote to list
361 kv_dict[key] = [kv_dict[key], val]
362 else:
363 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
364 else:
365 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000366 return kv_dict
367
368 def parse_memory_region_packet(self, context):
369 # Ensure we have a context.
370 self.assertIsNotNone(context.get("memory_region_response"))
371
372 # Pull out key:value; pairs.
373 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
374
375 # Validate keys are known.
376 for (key, val) in mem_region_dict.items():
377 self.assertTrue(key in ["start", "size", "permissions", "error"])
378 self.assertIsNotNone(val)
379
380 # Return the dictionary of key-value pairs for the memory region.
381 return mem_region_dict
382
383 def assert_address_within_memory_region(self, test_address, mem_region_dict):
384 self.assertIsNotNone(mem_region_dict)
385 self.assertTrue("start" in mem_region_dict)
386 self.assertTrue("size" in mem_region_dict)
387
388 range_start = int(mem_region_dict["start"], 16)
389 range_size = int(mem_region_dict["size"], 16)
390 range_end = range_start + range_size
391
392 if test_address < range_start:
393 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
394 elif test_address >= range_end:
395 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
396
397 def add_threadinfo_collection_packets(self):
398 self.test_sequence.add_log_lines(
399 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
400 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
401 "save_key":"threadinfo_responses" } ],
402 True)
403
404 def parse_threadinfo_packets(self, context):
405 """Return an array of thread ids (decimal ints), one per thread."""
406 threadinfo_responses = context.get("threadinfo_responses")
407 self.assertIsNotNone(threadinfo_responses)
408
409 thread_ids = []
410 for threadinfo_response in threadinfo_responses:
411 new_thread_infos = parse_threadinfo_response(threadinfo_response)
412 thread_ids.extend(new_thread_infos)
413 return thread_ids
414
415 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
416 start_time = time.time()
417 timeout_time = start_time + timeout_seconds
418
419 actual_thread_count = 0
420 while actual_thread_count < thread_count:
421 self.reset_test_sequence()
422 self.add_threadinfo_collection_packets()
423
424 context = self.expect_gdbremote_sequence()
425 self.assertIsNotNone(context)
426
427 threads = self.parse_threadinfo_packets(context)
428 self.assertIsNotNone(threads)
429
430 actual_thread_count = len(threads)
431
432 if time.time() > timeout_time:
433 raise Exception(
434 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
435 timeout_seconds, thread_count, actual_thread_count))
436
437 return threads
438
439 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
440 self.test_sequence.add_log_lines(
441 [# Set the breakpoint.
442 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
443 # Verify the stub could set it.
444 "send packet: $OK#00",
445 ], True)
446
447 if (do_continue):
448 self.test_sequence.add_log_lines(
449 [# Continue the inferior.
450 "read packet: $c#00",
451 # Expect a breakpoint stop report.
452 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
453 ], True)
454
455 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
456 self.test_sequence.add_log_lines(
457 [# Remove the breakpoint.
458 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
459 # Verify the stub could unset it.
460 "send packet: $OK#00",
461 ], True)
462
463 def add_qSupported_packets(self):
464 self.test_sequence.add_log_lines(
465 ["read packet: $qSupported#00",
466 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
467 ], True)
468
469 _KNOWN_QSUPPORTED_STUB_FEATURES = [
470 "augmented-libraries-svr4-read",
471 "PacketSize",
472 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000473 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000474 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000475 "qXfer:auxv:read",
476 "qXfer:libraries:read",
477 "qXfer:libraries-svr4:read",
478 ]
479
480 def parse_qSupported_response(self, context):
481 self.assertIsNotNone(context)
482
483 raw_response = context.get("qSupported_response")
484 self.assertIsNotNone(raw_response)
485
486 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
487 # +,-,? is stripped from the key and set as the value.
488 supported_dict = {}
489 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
490 key = match.group(1)
491 val = match.group(3)
492
493 # key=val: store as is
494 if val and len(val) > 0:
495 supported_dict[key] = val
496 else:
497 if len(key) < 2:
498 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
499 supported_type = key[-1]
500 key = key[:-1]
501 if not supported_type in ["+", "-", "?"]:
502 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
503 supported_dict[key] = supported_type
504 # Ensure we know the supported element
505 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
506 raise Exception("unknown qSupported stub feature reported: %s" % key)
507
508 return supported_dict
509
510 def run_process_then_stop(self, run_seconds=1):
511 # Tell the stub to continue.
512 self.test_sequence.add_log_lines(
513 ["read packet: $vCont;c#00"],
514 True)
515 context = self.expect_gdbremote_sequence()
516
517 # Wait for run_seconds.
518 time.sleep(run_seconds)
519
520 # Send an interrupt, capture a T response.
521 self.reset_test_sequence()
522 self.test_sequence.add_log_lines(
523 ["read packet: {}".format(chr(03)),
524 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
525 True)
526 context = self.expect_gdbremote_sequence()
527 self.assertIsNotNone(context)
528 self.assertIsNotNone(context.get("stop_result"))
529
530 return context
531
532 def select_modifiable_register(self, reg_infos):
533 """Find a register that can be read/written freely."""
534 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
535
536 # First check for the first register from the preferred register name set.
537 alternative_register_index = None
538
539 self.assertIsNotNone(reg_infos)
540 for reg_info in reg_infos:
541 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
542 # We found a preferred register. Use it.
543 return reg_info["lldb_register_index"]
544 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
545 # A frame pointer register will do as a register to modify temporarily.
546 alternative_register_index = reg_info["lldb_register_index"]
547
548 # We didn't find a preferred register. Return whatever alternative register
549 # we found, if any.
550 return alternative_register_index
551
552 def extract_registers_from_stop_notification(self, stop_key_vals_text):
553 self.assertIsNotNone(stop_key_vals_text)
554 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
555
556 registers = {}
557 for (key, val) in kv_dict.items():
558 if re.match(r"^[0-9a-fA-F]+", key):
559 registers[int(key, 16)] = val
560 return registers
561
562 def gather_register_infos(self):
563 self.reset_test_sequence()
564 self.add_register_info_collection_packets()
565
566 context = self.expect_gdbremote_sequence()
567 self.assertIsNotNone(context)
568
569 reg_infos = self.parse_register_info_packets(context)
570 self.assertIsNotNone(reg_infos)
571 self.add_lldb_register_index(reg_infos)
572
573 return reg_infos
574
575 def find_generic_register_with_name(self, reg_infos, generic_name):
576 self.assertIsNotNone(reg_infos)
577 for reg_info in reg_infos:
578 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
579 return reg_info
580 return None
581
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000582 def decode_gdbremote_binary(self, encoded_bytes):
583 decoded_bytes = ""
584 i = 0
585 while i < len(encoded_bytes):
586 if encoded_bytes[i] == "}":
587 # Handle escaped char.
588 self.assertTrue(i + 1 < len(encoded_bytes))
589 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
590 i +=2
591 elif encoded_bytes[i] == "*":
592 # Handle run length encoding.
593 self.assertTrue(len(decoded_bytes) > 0)
594 self.assertTrue(i + 1 < len(encoded_bytes))
595 repeat_count = ord(encoded_bytes[i+1]) - 29
596 decoded_bytes += decoded_bytes[-1] * repeat_count
597 i += 2
598 else:
599 decoded_bytes += encoded_bytes[i]
600 i += 1
601 return decoded_bytes
602
603 def build_auxv_dict(self, endian, word_size, auxv_data):
604 self.assertIsNotNone(endian)
605 self.assertIsNotNone(word_size)
606 self.assertIsNotNone(auxv_data)
607
608 auxv_dict = {}
609
610 while len(auxv_data) > 0:
611 # Chop off key.
612 raw_key = auxv_data[:word_size]
613 auxv_data = auxv_data[word_size:]
614
615 # Chop of value.
616 raw_value = auxv_data[:word_size]
617 auxv_data = auxv_data[word_size:]
618
619 # Convert raw text from target endian.
620 key = unpack_endian_binary_string(endian, raw_key)
621 value = unpack_endian_binary_string(endian, raw_value)
622
623 # Handle ending entry.
624 if key == 0:
625 self.assertEquals(value, 0)
626 return auxv_dict
627
628 # The key should not already be present.
629 self.assertFalse(key in auxv_dict)
630 auxv_dict[key] = value
631
632 self.fail("should not reach here - implies required double zero entry not found")
633 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000634
635 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
636 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
637 offset = 0
638 done = False
639 decoded_data = ""
640
641 while not done:
642 # Grab the next iteration of data.
643 self.reset_test_sequence()
644 self.test_sequence.add_log_lines([
645 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000646 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000647 ], True)
648
649 context = self.expect_gdbremote_sequence()
650 self.assertIsNotNone(context)
651
652 response_type = context.get("response_type")
653 self.assertIsNotNone(response_type)
654 self.assertTrue(response_type in ["l", "m"])
655
656 # Move offset along.
657 offset += chunk_length
658
659 # Figure out if we're done. We're done if the response type is l.
660 done = response_type == "l"
661
662 # Decode binary data.
663 content_raw = context.get("content_raw")
664 if content_raw and len(content_raw) > 0:
665 self.assertIsNotNone(content_raw)
666 decoded_data += self.decode_gdbremote_binary(content_raw)
667 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000668
669 def add_interrupt_packets(self):
670 self.test_sequence.add_log_lines([
671 # Send the intterupt.
672 "read packet: {}".format(chr(03)),
673 # And wait for the stop notification.
674 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
675 ], True)
676
677 def parse_interrupt_packets(self, context):
678 self.assertIsNotNone(context.get("stop_signo"))
679 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000680 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
681
682 def add_QSaveRegisterState_packets(self, thread_id):
683 if thread_id:
684 # Use the thread suffix form.
685 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
686 else:
687 request = "read packet: $QSaveRegisterState#00"
688
689 self.test_sequence.add_log_lines([
690 request,
691 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
692 ], True)
693
694 def parse_QSaveRegisterState_response(self, context):
695 self.assertIsNotNone(context)
696
697 save_response = context.get("save_response")
698 self.assertIsNotNone(save_response)
699
700 if len(save_response) < 1 or save_response[0] == "E":
701 # error received
702 return (False, None)
703 else:
704 return (True, int(save_response))
705
706 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
707 if thread_id:
708 # Use the thread suffix form.
709 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
710 else:
711 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
712
713 self.test_sequence.add_log_lines([
714 request,
715 "send packet: $OK#00"
716 ], True)
717
718 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
719 self.assertIsNotNone(reg_infos)
720
721 successful_writes = 0
722 failed_writes = 0
723
724 for reg_info in reg_infos:
725 # Use the lldb register index added to the reg info. We're not necessarily
726 # working off a full set of register infos, so an inferred register index could be wrong.
727 reg_index = reg_info["lldb_register_index"]
728 self.assertIsNotNone(reg_index)
729
730 reg_byte_size = int(reg_info["bitsize"])/8
731 self.assertTrue(reg_byte_size > 0)
732
733 # Handle thread suffix.
734 if thread_id:
735 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
736 else:
737 p_request = "read packet: $p{:x}#00".format(reg_index)
738
739 # Read the existing value.
740 self.reset_test_sequence()
741 self.test_sequence.add_log_lines([
742 p_request,
743 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
744 ], True)
745 context = self.expect_gdbremote_sequence()
746 self.assertIsNotNone(context)
747
748 # Verify the response length.
749 p_response = context.get("p_response")
750 self.assertIsNotNone(p_response)
751 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
752
753 # Flip the value by xoring with all 1s
754 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
755 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
756 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
757
758 # Handle thread suffix for P.
759 if thread_id:
760 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
761 else:
762 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
763
764 # Write the flipped value to the register.
765 self.reset_test_sequence()
766 self.test_sequence.add_log_lines([
767 P_request,
768 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
769 ], True)
770 context = self.expect_gdbremote_sequence()
771 self.assertIsNotNone(context)
772
773 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
774 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
775 # all flipping perfectly.
776 P_response = context.get("P_response")
777 self.assertIsNotNone(P_response)
778 if P_response == "OK":
779 successful_writes += 1
780 else:
781 failed_writes += 1
782 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
783
784 # Read back the register value, ensure it matches the flipped value.
785 if P_response == "OK":
786 self.reset_test_sequence()
787 self.test_sequence.add_log_lines([
788 p_request,
789 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
790 ], True)
791 context = self.expect_gdbremote_sequence()
792 self.assertIsNotNone(context)
793
794 verify_p_response_raw = context.get("p_response")
795 self.assertIsNotNone(verify_p_response_raw)
796 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
797
798 if verify_bits != flipped_bits_int:
799 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
800 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
801 successful_writes -= 1
802 failed_writes +=1
803
804 return (successful_writes, failed_writes)
805
806 def is_bit_flippable_register(self, reg_info):
807 if not reg_info:
808 return False
809 if not "set" in reg_info:
810 return False
811 if reg_info["set"] != "General Purpose Registers":
812 return False
813 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
814 # Don't try to bit flip registers contained in another register.
815 return False
816 if re.match("^.s$", reg_info["name"]):
817 # This is a 2-letter register name that ends in "s", like a segment register.
818 # Don't try to bit flip these.
819 return False
820 # Okay, this looks fine-enough.
821 return True
822
823 def read_register_values(self, reg_infos, endian, thread_id=None):
824 self.assertIsNotNone(reg_infos)
825 values = {}
826
827 for reg_info in reg_infos:
828 # We append a register index when load reg infos so we can work with subsets.
829 reg_index = reg_info.get("lldb_register_index")
830 self.assertIsNotNone(reg_index)
831
832 # Handle thread suffix.
833 if thread_id:
834 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
835 else:
836 p_request = "read packet: $p{:x}#00".format(reg_index)
837
838 # Read it with p.
839 self.reset_test_sequence()
840 self.test_sequence.add_log_lines([
841 p_request,
842 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
843 ], True)
844 context = self.expect_gdbremote_sequence()
845 self.assertIsNotNone(context)
846
847 # Convert value from target endian to integral.
848 p_response = context.get("p_response")
849 self.assertIsNotNone(p_response)
850 self.assertTrue(len(p_response) > 0)
851 self.assertFalse(p_response[0] == "E")
852
853 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
854
Todd Fialae2202002014-06-27 22:11:56 +0000855 return values
856
857 def add_vCont_query_packets(self):
858 self.test_sequence.add_log_lines([
859 "read packet: $vCont?#00",
860 {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } },
861 ], True)
862
863 def parse_vCont_query_response(self, context):
864 self.assertIsNotNone(context)
865 vCont_query_response = context.get("vCont_query_response")
866
867 # Handle case of no vCont support at all - in which case the capture group will be none or zero length.
868 if not vCont_query_response or len(vCont_query_response) == 0:
869 return {}
870
871 return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0}
872
873 def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"):
874 """Used by single step test that appears in a few different contexts."""
875 single_step_count = 0
876
877 while single_step_count < max_step_count:
878 self.assertIsNotNone(thread_id)
879
880 # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id.
881 step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
882 # print "\nstep_packet created: {}\n".format(step_packet)
883
884 # Single step.
885 self.reset_test_sequence()
886 if use_Hc_packet:
887 self.test_sequence.add_log_lines(
888 [# Set the continue thread.
889 "read packet: $Hc{0:x}#00".format(thread_id),
890 "send packet: $OK#00",
891 ], True)
892 self.test_sequence.add_log_lines([
893 # Single step.
894 step_packet,
895 # "read packet: $vCont;s:{0:x}#00".format(thread_id),
896 # Expect a breakpoint stop report.
897 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
898 ], True)
899 context = self.expect_gdbremote_sequence()
900 self.assertIsNotNone(context)
901 self.assertIsNotNone(context.get("stop_signo"))
902 self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP)
903
904 single_step_count += 1
905
906 # See if the predicate is true. If so, we're done.
907 if predicate(args):
908 return (True, single_step_count)
909
910 # The predicate didn't return true within the runaway step count.
911 return (False, single_step_count)
912
913 def g_c1_c2_contents_are(self, args):
914 """Used by single step test that appears in a few different contexts."""
915 g_c1_address = args["g_c1_address"]
916 g_c2_address = args["g_c2_address"]
917 expected_g_c1 = args["expected_g_c1"]
918 expected_g_c2 = args["expected_g_c2"]
919
920 # Read g_c1 and g_c2 contents.
921 self.reset_test_sequence()
922 self.test_sequence.add_log_lines(
923 ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
924 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} },
925 "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
926 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }],
927 True)
928
929 # Run the packet stream.
930 context = self.expect_gdbremote_sequence()
931 self.assertIsNotNone(context)
932
933 # Check if what we read from inferior memory is what we are expecting.
934 self.assertIsNotNone(context.get("g_c1_contents"))
935 self.assertIsNotNone(context.get("g_c2_contents"))
936
937 return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2)
938
939 def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"):
940 """Used by single step test that appears in a few different contexts."""
941 # Start up the inferior.
942 procs = self.prep_debug_monitor_and_inferior(
943 inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"])
944
945 # Run the process
946 self.test_sequence.add_log_lines(
947 [# Start running after initial stop.
948 "read packet: $c#00",
949 # Match output line that prints the memory address of the function call entry point.
950 # Note we require launch-only testing so we can get inferior otuput.
951 { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
952 "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} },
953 # Now stop the inferior.
954 "read packet: {}".format(chr(03)),
955 # And wait for the stop notification.
956 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }],
957 True)
958
959 # Run the packet stream.
960 context = self.expect_gdbremote_sequence()
961 self.assertIsNotNone(context)
962
963 # Grab the main thread id.
964 self.assertIsNotNone(context.get("stop_thread_id"))
965 main_thread_id = int(context.get("stop_thread_id"), 16)
966
967 # Grab the function address.
968 self.assertIsNotNone(context.get("function_address"))
969 function_address = int(context.get("function_address"), 16)
970
971 # Grab the data addresses.
972 self.assertIsNotNone(context.get("g_c1_address"))
973 g_c1_address = int(context.get("g_c1_address"), 16)
974
975 self.assertIsNotNone(context.get("g_c2_address"))
976 g_c2_address = int(context.get("g_c2_address"), 16)
977
978 # Set a breakpoint at the given address.
979 # Note this might need to be switched per platform (ARM, mips, etc.).
980 BREAKPOINT_KIND = 1
981 self.reset_test_sequence()
982 self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND)
983 context = self.expect_gdbremote_sequence()
984 self.assertIsNotNone(context)
985
986 # Remove the breakpoint.
987 self.reset_test_sequence()
988 self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND)
989 context = self.expect_gdbremote_sequence()
990 self.assertIsNotNone(context)
991
992 # Verify g_c1 and g_c2 match expected initial state.
993 args = {}
994 args["g_c1_address"] = g_c1_address
995 args["g_c2_address"] = g_c2_address
996 args["expected_g_c1"] = "0"
997 args["expected_g_c2"] = "1"
998
999 self.assertTrue(self.g_c1_c2_contents_are(args))
1000
1001 # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code.
1002 args["expected_g_c1"] = "1"
1003 args["expected_g_c2"] = "1"
1004 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1005 self.assertTrue(state_reached)
1006
1007 # Verify we hit the next state.
1008 args["expected_g_c1"] = "1"
1009 args["expected_g_c2"] = "0"
1010 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1011 self.assertTrue(state_reached)
1012 self.assertEquals(step_count, 1)
1013
1014 # Verify we hit the next state.
1015 args["expected_g_c1"] = "0"
1016 args["expected_g_c2"] = "0"
1017 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1018 self.assertTrue(state_reached)
1019 self.assertEquals(step_count, 1)
1020
1021 # Verify we hit the next state.
1022 args["expected_g_c1"] = "0"
1023 args["expected_g_c2"] = "1"
1024 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1025 self.assertTrue(state_reached)
1026 self.assertEquals(step_count, 1)