blob: 6debf6494a620319c6e59b9b1e53b4fd810f7e55 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fialae50b2e42014-06-13 19:11:33 +00006import unittest2
7import pexpect
8import platform
Todd Fiala9e2d3292014-07-09 23:10:43 +00009import random
Todd Fialae2202002014-06-27 22:11:56 +000010import re
Todd Fialae50b2e42014-06-13 19:11:33 +000011import sets
12import signal
13import socket
14import subprocess
15import sys
16import time
17from lldbtest import *
18from lldbgdbserverutils import *
19import logging
20import os.path
21
22class GdbRemoteTestCaseBase(TestBase):
23
24 mydir = TestBase.compute_mydir(__file__)
25
Todd Fialae50b2e42014-06-13 19:11:33 +000026 _TIMEOUT_SECONDS = 5
27
28 _GDBREMOTE_KILL_PACKET = "$k#6b"
29
30 _LOGGING_LEVEL = logging.WARNING
31 # _LOGGING_LEVEL = logging.DEBUG
32
33 _STARTUP_ATTACH = "attach"
34 _STARTUP_LAUNCH = "launch"
35
36 # GDB Signal numbers that are not target-specific used for common exceptions
37 TARGET_EXC_BAD_ACCESS = 0x91
38 TARGET_EXC_BAD_INSTRUCTION = 0x92
39 TARGET_EXC_ARITHMETIC = 0x93
40 TARGET_EXC_EMULATION = 0x94
41 TARGET_EXC_SOFTWARE = 0x95
42 TARGET_EXC_BREAKPOINT = 0x96
43
44 def setUp(self):
45 TestBase.setUp(self)
46 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
47 logging.basicConfig(format=FORMAT)
48 self.logger = logging.getLogger(__name__)
49 self.logger.setLevel(self._LOGGING_LEVEL)
50 self.test_sequence = GdbRemoteTestSequence(self.logger)
51 self.set_inferior_startup_launch()
Todd Fiala9e2d3292014-07-09 23:10:43 +000052 self.port = self.get_next_port()
Todd Fiala67041192014-07-11 22:50:13 +000053 self.named_pipe_path = None
Todd Fialae50b2e42014-06-13 19:11:33 +000054
Todd Fiala9e2d3292014-07-09 23:10:43 +000055 def get_next_port(self):
56 return 12000 + random.randint(0,3999)
Todd Fialae50b2e42014-06-13 19:11:33 +000057
58 def reset_test_sequence(self):
59 self.test_sequence = GdbRemoteTestSequence(self.logger)
60
61 def init_llgs_test(self):
62 self.debug_monitor_exe = get_lldb_gdbserver_exe()
63 if not self.debug_monitor_exe:
64 self.skipTest("lldb_gdbserver exe not found")
Todd Fiala8aae4f42014-06-13 23:34:17 +000065 self.debug_monitor_extra_args = " -c 'log enable -T -f process-{}.log lldb break process thread' -c 'log enable -T -f packets-{}.log gdb-remote packets'".format(self.id(), self.id(), self.id())
Todd Fialae50b2e42014-06-13 19:11:33 +000066
67 def init_debugserver_test(self):
68 self.debug_monitor_exe = get_debugserver_exe()
69 if not self.debug_monitor_exe:
70 self.skipTest("debugserver exe not found")
71 self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName)
72
73 def create_socket(self):
74 sock = socket.socket()
75 logger = self.logger
76
77 def shutdown_socket():
78 if sock:
79 try:
80 # send the kill packet so lldb-gdbserver shuts down gracefully
81 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
82 except:
83 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
84
85 try:
86 sock.close()
87 except:
88 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
89
90 self.addTearDownHook(shutdown_socket)
91
92 sock.connect(('localhost', self.port))
93 return sock
94
95 def set_inferior_startup_launch(self):
96 self._inferior_startup = self._STARTUP_LAUNCH
97
98 def set_inferior_startup_attach(self):
99 self._inferior_startup = self._STARTUP_ATTACH
100
Todd Fiala8aae4f42014-06-13 23:34:17 +0000101 def launch_debug_monitor(self, attach_pid=None):
102 # Create the command line.
Todd Fialae50b2e42014-06-13 19:11:33 +0000103 commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port)
104 if attach_pid:
105 commandline += " --attach=%d" % attach_pid
Todd Fiala67041192014-07-11 22:50:13 +0000106 if self.named_pipe_path:
107 commandline += " --named-pipe %s" % self.named_pipe_path
Todd Fialae50b2e42014-06-13 19:11:33 +0000108
Todd Fiala8aae4f42014-06-13 23:34:17 +0000109 # Start the server.
Todd Fialae50b2e42014-06-13 19:11:33 +0000110 server = pexpect.spawn(commandline)
111
112 # Turn on logging for what the child sends back.
113 if self.TraceOn():
114 server.logfile_read = sys.stdout
115
Todd Fiala8aae4f42014-06-13 23:34:17 +0000116 return server
117
118 def connect_to_debug_monitor(self, attach_pid=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000119 attempts = 0
120 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000121
Todd Fiala8aae4f42014-06-13 23:34:17 +0000122 while attempts < MAX_ATTEMPTS:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000123 server = self.launch_debug_monitor(attach_pid=attach_pid)
124
125 # Wait until we receive the server ready message before continuing.
126 port_good = True
Todd Fiala8aae4f42014-06-13 23:34:17 +0000127 try:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000128 server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port))
129 except:
130 port_good = False
131 server.close()
Todd Fialae50b2e42014-06-13 19:11:33 +0000132
Todd Fiala9e2d3292014-07-09 23:10:43 +0000133 if port_good:
134 # Schedule debug monitor to be shut down during teardown.
135 logger = self.logger
136 def shutdown_debug_monitor():
137 try:
138 server.close()
139 except:
140 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
141 self.addTearDownHook(shutdown_debug_monitor)
Todd Fiala8aae4f42014-06-13 23:34:17 +0000142
Todd Fiala9e2d3292014-07-09 23:10:43 +0000143 # Create a socket to talk to the server
144 try:
145 self.sock = self.create_socket()
146 return server
147 except socket.error as serr:
148 # We're only trying to handle connection refused.
149 if serr.errno != errno.ECONNREFUSED:
150 raise serr
151 # We should close the server here to be safe.
152 server.close()
153
154 # Increment attempts.
155 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
156 attempts += 1
157
158 # And wait a random length of time before next attempt, to avoid collisions.
159 time.sleep(random.randint(1,5))
160
161 # Now grab a new port number.
162 self.port = self.get_next_port()
Todd Fiala8aae4f42014-06-13 23:34:17 +0000163
164 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000165
166 def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3):
167 # We're going to start a child process that the debug monitor stub can later attach to.
168 # This process needs to be started so that it just hangs around for a while. We'll
169 # have it sleep.
170 exe_path = os.path.abspath("a.out")
171
172 args = [exe_path]
173 if inferior_args:
174 args.extend(inferior_args)
175 if sleep_seconds:
176 args.append("sleep:%d" % sleep_seconds)
177
178 return subprocess.Popen(args)
179
180 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3):
181 """Prep the debug monitor, the inferior, and the expected packet stream.
182
183 Handle the separate cases of using the debug monitor in attach-to-inferior mode
184 and in launch-inferior mode.
185
186 For attach-to-inferior mode, the inferior process is first started, then
187 the debug monitor is started in attach to pid mode (using --attach on the
188 stub command line), and the no-ack-mode setup is appended to the packet
189 stream. The packet stream is not yet executed, ready to have more expected
190 packet entries added to it.
191
192 For launch-inferior mode, the stub is first started, then no ack mode is
193 setup on the expected packet stream, then the verified launch packets are added
194 to the expected socket stream. The packet stream is not yet executed, ready
195 to have more expected packet entries added to it.
196
197 The return value is:
198 {inferior:<inferior>, server:<server>}
199 """
200 inferior = None
201 attach_pid = None
202
203 if self._inferior_startup == self._STARTUP_ATTACH:
204 # Launch the process that we'll use as the inferior.
205 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds)
206 self.assertIsNotNone(inferior)
207 self.assertTrue(inferior.pid > 0)
208 attach_pid = inferior.pid
209
210 # Launch the debug monitor stub, attaching to the inferior.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000211 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000212 self.assertIsNotNone(server)
213
214 if self._inferior_startup == self._STARTUP_LAUNCH:
215 # Build launch args
216 launch_args = [os.path.abspath('a.out')]
217 if inferior_args:
218 launch_args.extend(inferior_args)
219
220 # Build the expected protocol stream
221 self.add_no_ack_remote_stream()
222 if self._inferior_startup == self._STARTUP_LAUNCH:
223 self.add_verified_launch_packets(launch_args)
224
225 return {"inferior":inferior, "server":server}
226
227 def add_no_ack_remote_stream(self):
228 self.test_sequence.add_log_lines(
229 ["read packet: +",
230 "read packet: $QStartNoAckMode#b0",
231 "send packet: +",
232 "send packet: $OK#9a",
233 "read packet: +"],
234 True)
235
236 def add_verified_launch_packets(self, launch_args):
237 self.test_sequence.add_log_lines(
238 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
239 "send packet: $OK#00",
240 "read packet: $qLaunchSuccess#a5",
241 "send packet: $OK#00"],
242 True)
243
244 def add_thread_suffix_request_packets(self):
245 self.test_sequence.add_log_lines(
246 ["read packet: $QThreadSuffixSupported#00",
247 "send packet: $OK#00",
248 ], True)
249
250 def add_process_info_collection_packets(self):
251 self.test_sequence.add_log_lines(
252 ["read packet: $qProcessInfo#00",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000253 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000254 True)
255
256 _KNOWN_PROCESS_INFO_KEYS = [
257 "pid",
258 "parent-pid",
259 "real-uid",
260 "real-gid",
261 "effective-uid",
262 "effective-gid",
263 "cputype",
264 "cpusubtype",
265 "ostype",
266 "vendor",
267 "endian",
268 "ptrsize"
269 ]
270
271 def parse_process_info_response(self, context):
272 # Ensure we have a process info response.
273 self.assertIsNotNone(context)
274 process_info_raw = context.get("process_info_raw")
275 self.assertIsNotNone(process_info_raw)
276
277 # Pull out key:value; pairs.
278 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
279
280 # Validate keys are known.
281 for (key, val) in process_info_dict.items():
282 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
283 self.assertIsNotNone(val)
284
285 return process_info_dict
286
287 def add_register_info_collection_packets(self):
288 self.test_sequence.add_log_lines(
289 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
290 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
291 "save_key":"reg_info_responses" } ],
292 True)
293
294 def parse_register_info_packets(self, context):
295 """Return an array of register info dictionaries, one per register info."""
296 reg_info_responses = context.get("reg_info_responses")
297 self.assertIsNotNone(reg_info_responses)
298
299 # Parse register infos.
300 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
301
Todd Fiala50a211b2014-06-14 22:00:36 +0000302 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000303 if not timeout_seconds:
304 timeout_seconds = self._TIMEOUT_SECONDS
305 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000306
307 _KNOWN_REGINFO_KEYS = [
308 "name",
309 "alt-name",
310 "bitsize",
311 "offset",
312 "encoding",
313 "format",
314 "set",
315 "gcc",
316 "dwarf",
317 "generic",
318 "container-regs",
319 "invalidate-regs"
320 ]
321
322 def assert_valid_reg_info(self, reg_info):
323 # Assert we know about all the reginfo keys parsed.
324 for key in reg_info:
325 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
326
327 # Check the bare-minimum expected set of register info keys.
328 self.assertTrue("name" in reg_info)
329 self.assertTrue("bitsize" in reg_info)
330 self.assertTrue("offset" in reg_info)
331 self.assertTrue("encoding" in reg_info)
332 self.assertTrue("format" in reg_info)
333
334 def find_pc_reg_info(self, reg_infos):
335 lldb_reg_index = 0
336 for reg_info in reg_infos:
337 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
338 return (lldb_reg_index, reg_info)
339 lldb_reg_index += 1
340
341 return (None, None)
342
343 def add_lldb_register_index(self, reg_infos):
344 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
345
346 We'll use this when we want to call packets like P/p with a register index but do so
347 on only a subset of the full register info set.
348 """
349 self.assertIsNotNone(reg_infos)
350
351 reg_index = 0
352 for reg_info in reg_infos:
353 reg_info["lldb_register_index"] = reg_index
354 reg_index += 1
355
356 def add_query_memory_region_packets(self, address):
357 self.test_sequence.add_log_lines(
358 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
359 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
360 True)
361
Todd Fialac30281a2014-06-14 03:03:23 +0000362 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000363 self.assertIsNotNone(key_val_text)
364 kv_dict = {}
365 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000366 key = match.group(1)
367 val = match.group(2)
368 if key in kv_dict:
369 if allow_dupes:
370 if type(kv_dict[key]) == list:
371 kv_dict[key].append(val)
372 else:
373 # Promote to list
374 kv_dict[key] = [kv_dict[key], val]
375 else:
376 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
377 else:
378 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000379 return kv_dict
380
381 def parse_memory_region_packet(self, context):
382 # Ensure we have a context.
383 self.assertIsNotNone(context.get("memory_region_response"))
384
385 # Pull out key:value; pairs.
386 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
387
388 # Validate keys are known.
389 for (key, val) in mem_region_dict.items():
390 self.assertTrue(key in ["start", "size", "permissions", "error"])
391 self.assertIsNotNone(val)
392
393 # Return the dictionary of key-value pairs for the memory region.
394 return mem_region_dict
395
396 def assert_address_within_memory_region(self, test_address, mem_region_dict):
397 self.assertIsNotNone(mem_region_dict)
398 self.assertTrue("start" in mem_region_dict)
399 self.assertTrue("size" in mem_region_dict)
400
401 range_start = int(mem_region_dict["start"], 16)
402 range_size = int(mem_region_dict["size"], 16)
403 range_end = range_start + range_size
404
405 if test_address < range_start:
406 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
407 elif test_address >= range_end:
408 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
409
410 def add_threadinfo_collection_packets(self):
411 self.test_sequence.add_log_lines(
412 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
413 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
414 "save_key":"threadinfo_responses" } ],
415 True)
416
417 def parse_threadinfo_packets(self, context):
418 """Return an array of thread ids (decimal ints), one per thread."""
419 threadinfo_responses = context.get("threadinfo_responses")
420 self.assertIsNotNone(threadinfo_responses)
421
422 thread_ids = []
423 for threadinfo_response in threadinfo_responses:
424 new_thread_infos = parse_threadinfo_response(threadinfo_response)
425 thread_ids.extend(new_thread_infos)
426 return thread_ids
427
428 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
429 start_time = time.time()
430 timeout_time = start_time + timeout_seconds
431
432 actual_thread_count = 0
433 while actual_thread_count < thread_count:
434 self.reset_test_sequence()
435 self.add_threadinfo_collection_packets()
436
437 context = self.expect_gdbremote_sequence()
438 self.assertIsNotNone(context)
439
440 threads = self.parse_threadinfo_packets(context)
441 self.assertIsNotNone(threads)
442
443 actual_thread_count = len(threads)
444
445 if time.time() > timeout_time:
446 raise Exception(
447 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
448 timeout_seconds, thread_count, actual_thread_count))
449
450 return threads
451
452 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
453 self.test_sequence.add_log_lines(
454 [# Set the breakpoint.
455 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
456 # Verify the stub could set it.
457 "send packet: $OK#00",
458 ], True)
459
460 if (do_continue):
461 self.test_sequence.add_log_lines(
462 [# Continue the inferior.
463 "read packet: $c#00",
464 # Expect a breakpoint stop report.
465 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
466 ], True)
467
468 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
469 self.test_sequence.add_log_lines(
470 [# Remove the breakpoint.
471 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
472 # Verify the stub could unset it.
473 "send packet: $OK#00",
474 ], True)
475
476 def add_qSupported_packets(self):
477 self.test_sequence.add_log_lines(
478 ["read packet: $qSupported#00",
479 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
480 ], True)
481
482 _KNOWN_QSUPPORTED_STUB_FEATURES = [
483 "augmented-libraries-svr4-read",
484 "PacketSize",
485 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000486 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000487 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000488 "qXfer:auxv:read",
489 "qXfer:libraries:read",
490 "qXfer:libraries-svr4:read",
491 ]
492
493 def parse_qSupported_response(self, context):
494 self.assertIsNotNone(context)
495
496 raw_response = context.get("qSupported_response")
497 self.assertIsNotNone(raw_response)
498
499 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
500 # +,-,? is stripped from the key and set as the value.
501 supported_dict = {}
502 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
503 key = match.group(1)
504 val = match.group(3)
505
506 # key=val: store as is
507 if val and len(val) > 0:
508 supported_dict[key] = val
509 else:
510 if len(key) < 2:
511 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
512 supported_type = key[-1]
513 key = key[:-1]
514 if not supported_type in ["+", "-", "?"]:
515 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
516 supported_dict[key] = supported_type
517 # Ensure we know the supported element
518 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
519 raise Exception("unknown qSupported stub feature reported: %s" % key)
520
521 return supported_dict
522
523 def run_process_then_stop(self, run_seconds=1):
524 # Tell the stub to continue.
525 self.test_sequence.add_log_lines(
526 ["read packet: $vCont;c#00"],
527 True)
528 context = self.expect_gdbremote_sequence()
529
530 # Wait for run_seconds.
531 time.sleep(run_seconds)
532
533 # Send an interrupt, capture a T response.
534 self.reset_test_sequence()
535 self.test_sequence.add_log_lines(
536 ["read packet: {}".format(chr(03)),
537 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
538 True)
539 context = self.expect_gdbremote_sequence()
540 self.assertIsNotNone(context)
541 self.assertIsNotNone(context.get("stop_result"))
542
543 return context
544
545 def select_modifiable_register(self, reg_infos):
546 """Find a register that can be read/written freely."""
547 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
548
549 # First check for the first register from the preferred register name set.
550 alternative_register_index = None
551
552 self.assertIsNotNone(reg_infos)
553 for reg_info in reg_infos:
554 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
555 # We found a preferred register. Use it.
556 return reg_info["lldb_register_index"]
557 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
558 # A frame pointer register will do as a register to modify temporarily.
559 alternative_register_index = reg_info["lldb_register_index"]
560
561 # We didn't find a preferred register. Return whatever alternative register
562 # we found, if any.
563 return alternative_register_index
564
565 def extract_registers_from_stop_notification(self, stop_key_vals_text):
566 self.assertIsNotNone(stop_key_vals_text)
567 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
568
569 registers = {}
570 for (key, val) in kv_dict.items():
571 if re.match(r"^[0-9a-fA-F]+", key):
572 registers[int(key, 16)] = val
573 return registers
574
575 def gather_register_infos(self):
576 self.reset_test_sequence()
577 self.add_register_info_collection_packets()
578
579 context = self.expect_gdbremote_sequence()
580 self.assertIsNotNone(context)
581
582 reg_infos = self.parse_register_info_packets(context)
583 self.assertIsNotNone(reg_infos)
584 self.add_lldb_register_index(reg_infos)
585
586 return reg_infos
587
588 def find_generic_register_with_name(self, reg_infos, generic_name):
589 self.assertIsNotNone(reg_infos)
590 for reg_info in reg_infos:
591 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
592 return reg_info
593 return None
594
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000595 def decode_gdbremote_binary(self, encoded_bytes):
596 decoded_bytes = ""
597 i = 0
598 while i < len(encoded_bytes):
599 if encoded_bytes[i] == "}":
600 # Handle escaped char.
601 self.assertTrue(i + 1 < len(encoded_bytes))
602 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
603 i +=2
604 elif encoded_bytes[i] == "*":
605 # Handle run length encoding.
606 self.assertTrue(len(decoded_bytes) > 0)
607 self.assertTrue(i + 1 < len(encoded_bytes))
608 repeat_count = ord(encoded_bytes[i+1]) - 29
609 decoded_bytes += decoded_bytes[-1] * repeat_count
610 i += 2
611 else:
612 decoded_bytes += encoded_bytes[i]
613 i += 1
614 return decoded_bytes
615
616 def build_auxv_dict(self, endian, word_size, auxv_data):
617 self.assertIsNotNone(endian)
618 self.assertIsNotNone(word_size)
619 self.assertIsNotNone(auxv_data)
620
621 auxv_dict = {}
622
623 while len(auxv_data) > 0:
624 # Chop off key.
625 raw_key = auxv_data[:word_size]
626 auxv_data = auxv_data[word_size:]
627
628 # Chop of value.
629 raw_value = auxv_data[:word_size]
630 auxv_data = auxv_data[word_size:]
631
632 # Convert raw text from target endian.
633 key = unpack_endian_binary_string(endian, raw_key)
634 value = unpack_endian_binary_string(endian, raw_value)
635
636 # Handle ending entry.
637 if key == 0:
638 self.assertEquals(value, 0)
639 return auxv_dict
640
641 # The key should not already be present.
642 self.assertFalse(key in auxv_dict)
643 auxv_dict[key] = value
644
645 self.fail("should not reach here - implies required double zero entry not found")
646 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000647
648 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
649 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
650 offset = 0
651 done = False
652 decoded_data = ""
653
654 while not done:
655 # Grab the next iteration of data.
656 self.reset_test_sequence()
657 self.test_sequence.add_log_lines([
658 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000659 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000660 ], True)
661
662 context = self.expect_gdbremote_sequence()
663 self.assertIsNotNone(context)
664
665 response_type = context.get("response_type")
666 self.assertIsNotNone(response_type)
667 self.assertTrue(response_type in ["l", "m"])
668
669 # Move offset along.
670 offset += chunk_length
671
672 # Figure out if we're done. We're done if the response type is l.
673 done = response_type == "l"
674
675 # Decode binary data.
676 content_raw = context.get("content_raw")
677 if content_raw and len(content_raw) > 0:
678 self.assertIsNotNone(content_raw)
679 decoded_data += self.decode_gdbremote_binary(content_raw)
680 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000681
682 def add_interrupt_packets(self):
683 self.test_sequence.add_log_lines([
684 # Send the intterupt.
685 "read packet: {}".format(chr(03)),
686 # And wait for the stop notification.
687 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
688 ], True)
689
690 def parse_interrupt_packets(self, context):
691 self.assertIsNotNone(context.get("stop_signo"))
692 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000693 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
694
695 def add_QSaveRegisterState_packets(self, thread_id):
696 if thread_id:
697 # Use the thread suffix form.
698 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
699 else:
700 request = "read packet: $QSaveRegisterState#00"
701
702 self.test_sequence.add_log_lines([
703 request,
704 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
705 ], True)
706
707 def parse_QSaveRegisterState_response(self, context):
708 self.assertIsNotNone(context)
709
710 save_response = context.get("save_response")
711 self.assertIsNotNone(save_response)
712
713 if len(save_response) < 1 or save_response[0] == "E":
714 # error received
715 return (False, None)
716 else:
717 return (True, int(save_response))
718
719 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
720 if thread_id:
721 # Use the thread suffix form.
722 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
723 else:
724 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
725
726 self.test_sequence.add_log_lines([
727 request,
728 "send packet: $OK#00"
729 ], True)
730
731 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
732 self.assertIsNotNone(reg_infos)
733
734 successful_writes = 0
735 failed_writes = 0
736
737 for reg_info in reg_infos:
738 # Use the lldb register index added to the reg info. We're not necessarily
739 # working off a full set of register infos, so an inferred register index could be wrong.
740 reg_index = reg_info["lldb_register_index"]
741 self.assertIsNotNone(reg_index)
742
743 reg_byte_size = int(reg_info["bitsize"])/8
744 self.assertTrue(reg_byte_size > 0)
745
746 # Handle thread suffix.
747 if thread_id:
748 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
749 else:
750 p_request = "read packet: $p{:x}#00".format(reg_index)
751
752 # Read the existing value.
753 self.reset_test_sequence()
754 self.test_sequence.add_log_lines([
755 p_request,
756 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
757 ], True)
758 context = self.expect_gdbremote_sequence()
759 self.assertIsNotNone(context)
760
761 # Verify the response length.
762 p_response = context.get("p_response")
763 self.assertIsNotNone(p_response)
764 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
765
766 # Flip the value by xoring with all 1s
767 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
768 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
769 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
770
771 # Handle thread suffix for P.
772 if thread_id:
773 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
774 else:
775 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
776
777 # Write the flipped value to the register.
778 self.reset_test_sequence()
779 self.test_sequence.add_log_lines([
780 P_request,
781 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
782 ], True)
783 context = self.expect_gdbremote_sequence()
784 self.assertIsNotNone(context)
785
786 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
787 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
788 # all flipping perfectly.
789 P_response = context.get("P_response")
790 self.assertIsNotNone(P_response)
791 if P_response == "OK":
792 successful_writes += 1
793 else:
794 failed_writes += 1
795 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
796
797 # Read back the register value, ensure it matches the flipped value.
798 if P_response == "OK":
799 self.reset_test_sequence()
800 self.test_sequence.add_log_lines([
801 p_request,
802 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
803 ], True)
804 context = self.expect_gdbremote_sequence()
805 self.assertIsNotNone(context)
806
807 verify_p_response_raw = context.get("p_response")
808 self.assertIsNotNone(verify_p_response_raw)
809 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
810
811 if verify_bits != flipped_bits_int:
812 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
813 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
814 successful_writes -= 1
815 failed_writes +=1
816
817 return (successful_writes, failed_writes)
818
819 def is_bit_flippable_register(self, reg_info):
820 if not reg_info:
821 return False
822 if not "set" in reg_info:
823 return False
824 if reg_info["set"] != "General Purpose Registers":
825 return False
826 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
827 # Don't try to bit flip registers contained in another register.
828 return False
829 if re.match("^.s$", reg_info["name"]):
830 # This is a 2-letter register name that ends in "s", like a segment register.
831 # Don't try to bit flip these.
832 return False
833 # Okay, this looks fine-enough.
834 return True
835
836 def read_register_values(self, reg_infos, endian, thread_id=None):
837 self.assertIsNotNone(reg_infos)
838 values = {}
839
840 for reg_info in reg_infos:
841 # We append a register index when load reg infos so we can work with subsets.
842 reg_index = reg_info.get("lldb_register_index")
843 self.assertIsNotNone(reg_index)
844
845 # Handle thread suffix.
846 if thread_id:
847 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
848 else:
849 p_request = "read packet: $p{:x}#00".format(reg_index)
850
851 # Read it with p.
852 self.reset_test_sequence()
853 self.test_sequence.add_log_lines([
854 p_request,
855 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
856 ], True)
857 context = self.expect_gdbremote_sequence()
858 self.assertIsNotNone(context)
859
860 # Convert value from target endian to integral.
861 p_response = context.get("p_response")
862 self.assertIsNotNone(p_response)
863 self.assertTrue(len(p_response) > 0)
864 self.assertFalse(p_response[0] == "E")
865
866 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
867
Todd Fialae2202002014-06-27 22:11:56 +0000868 return values
869
870 def add_vCont_query_packets(self):
871 self.test_sequence.add_log_lines([
872 "read packet: $vCont?#00",
873 {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } },
874 ], True)
875
876 def parse_vCont_query_response(self, context):
877 self.assertIsNotNone(context)
878 vCont_query_response = context.get("vCont_query_response")
879
880 # Handle case of no vCont support at all - in which case the capture group will be none or zero length.
881 if not vCont_query_response or len(vCont_query_response) == 0:
882 return {}
883
884 return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0}
885
886 def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"):
887 """Used by single step test that appears in a few different contexts."""
888 single_step_count = 0
889
890 while single_step_count < max_step_count:
891 self.assertIsNotNone(thread_id)
892
893 # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id.
894 step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
895 # print "\nstep_packet created: {}\n".format(step_packet)
896
897 # Single step.
898 self.reset_test_sequence()
899 if use_Hc_packet:
900 self.test_sequence.add_log_lines(
901 [# Set the continue thread.
902 "read packet: $Hc{0:x}#00".format(thread_id),
903 "send packet: $OK#00",
904 ], True)
905 self.test_sequence.add_log_lines([
906 # Single step.
907 step_packet,
908 # "read packet: $vCont;s:{0:x}#00".format(thread_id),
909 # Expect a breakpoint stop report.
910 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
911 ], True)
912 context = self.expect_gdbremote_sequence()
913 self.assertIsNotNone(context)
914 self.assertIsNotNone(context.get("stop_signo"))
915 self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP)
916
917 single_step_count += 1
918
919 # See if the predicate is true. If so, we're done.
920 if predicate(args):
921 return (True, single_step_count)
922
923 # The predicate didn't return true within the runaway step count.
924 return (False, single_step_count)
925
926 def g_c1_c2_contents_are(self, args):
927 """Used by single step test that appears in a few different contexts."""
928 g_c1_address = args["g_c1_address"]
929 g_c2_address = args["g_c2_address"]
930 expected_g_c1 = args["expected_g_c1"]
931 expected_g_c2 = args["expected_g_c2"]
932
933 # Read g_c1 and g_c2 contents.
934 self.reset_test_sequence()
935 self.test_sequence.add_log_lines(
936 ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
937 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} },
938 "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
939 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }],
940 True)
941
942 # Run the packet stream.
943 context = self.expect_gdbremote_sequence()
944 self.assertIsNotNone(context)
945
946 # Check if what we read from inferior memory is what we are expecting.
947 self.assertIsNotNone(context.get("g_c1_contents"))
948 self.assertIsNotNone(context.get("g_c2_contents"))
949
950 return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2)
951
952 def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"):
953 """Used by single step test that appears in a few different contexts."""
954 # Start up the inferior.
955 procs = self.prep_debug_monitor_and_inferior(
956 inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"])
957
958 # Run the process
959 self.test_sequence.add_log_lines(
960 [# Start running after initial stop.
961 "read packet: $c#00",
962 # Match output line that prints the memory address of the function call entry point.
963 # Note we require launch-only testing so we can get inferior otuput.
964 { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
965 "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} },
966 # Now stop the inferior.
967 "read packet: {}".format(chr(03)),
968 # And wait for the stop notification.
969 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }],
970 True)
971
972 # Run the packet stream.
973 context = self.expect_gdbremote_sequence()
974 self.assertIsNotNone(context)
975
976 # Grab the main thread id.
977 self.assertIsNotNone(context.get("stop_thread_id"))
978 main_thread_id = int(context.get("stop_thread_id"), 16)
979
980 # Grab the function address.
981 self.assertIsNotNone(context.get("function_address"))
982 function_address = int(context.get("function_address"), 16)
983
984 # Grab the data addresses.
985 self.assertIsNotNone(context.get("g_c1_address"))
986 g_c1_address = int(context.get("g_c1_address"), 16)
987
988 self.assertIsNotNone(context.get("g_c2_address"))
989 g_c2_address = int(context.get("g_c2_address"), 16)
990
991 # Set a breakpoint at the given address.
992 # Note this might need to be switched per platform (ARM, mips, etc.).
993 BREAKPOINT_KIND = 1
994 self.reset_test_sequence()
995 self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND)
996 context = self.expect_gdbremote_sequence()
997 self.assertIsNotNone(context)
998
999 # Remove the breakpoint.
1000 self.reset_test_sequence()
1001 self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND)
1002 context = self.expect_gdbremote_sequence()
1003 self.assertIsNotNone(context)
1004
1005 # Verify g_c1 and g_c2 match expected initial state.
1006 args = {}
1007 args["g_c1_address"] = g_c1_address
1008 args["g_c2_address"] = g_c2_address
1009 args["expected_g_c1"] = "0"
1010 args["expected_g_c2"] = "1"
1011
1012 self.assertTrue(self.g_c1_c2_contents_are(args))
1013
1014 # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code.
1015 args["expected_g_c1"] = "1"
1016 args["expected_g_c2"] = "1"
1017 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1018 self.assertTrue(state_reached)
1019
1020 # Verify we hit the next state.
1021 args["expected_g_c1"] = "1"
1022 args["expected_g_c2"] = "0"
1023 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1024 self.assertTrue(state_reached)
1025 self.assertEquals(step_count, 1)
1026
1027 # Verify we hit the next state.
1028 args["expected_g_c1"] = "0"
1029 args["expected_g_c2"] = "0"
1030 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1031 self.assertTrue(state_reached)
1032 self.assertEquals(step_count, 1)
1033
1034 # Verify we hit the next state.
1035 args["expected_g_c1"] = "0"
1036 args["expected_g_c2"] = "1"
1037 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1038 self.assertTrue(state_reached)
1039 self.assertEquals(step_count, 1)
Todd Fialaaf245d12014-06-30 21:05:18 +00001040