blob: b0828697760c595ead6af1de1b506cd02691bc61 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fialae50b2e42014-06-13 19:11:33 +00006import unittest2
7import pexpect
8import platform
9import sets
10import signal
11import socket
12import subprocess
13import sys
14import time
15from lldbtest import *
16from lldbgdbserverutils import *
17import logging
18import os.path
19
20class GdbRemoteTestCaseBase(TestBase):
21
22 mydir = TestBase.compute_mydir(__file__)
23
24 port = 12345
25
26 _TIMEOUT_SECONDS = 5
27
28 _GDBREMOTE_KILL_PACKET = "$k#6b"
29
30 _LOGGING_LEVEL = logging.WARNING
31 # _LOGGING_LEVEL = logging.DEBUG
32
33 _STARTUP_ATTACH = "attach"
34 _STARTUP_LAUNCH = "launch"
35
36 # GDB Signal numbers that are not target-specific used for common exceptions
37 TARGET_EXC_BAD_ACCESS = 0x91
38 TARGET_EXC_BAD_INSTRUCTION = 0x92
39 TARGET_EXC_ARITHMETIC = 0x93
40 TARGET_EXC_EMULATION = 0x94
41 TARGET_EXC_SOFTWARE = 0x95
42 TARGET_EXC_BREAKPOINT = 0x96
43
44 def setUp(self):
45 TestBase.setUp(self)
46 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
47 logging.basicConfig(format=FORMAT)
48 self.logger = logging.getLogger(__name__)
49 self.logger.setLevel(self._LOGGING_LEVEL)
50 self.test_sequence = GdbRemoteTestSequence(self.logger)
51 self.set_inferior_startup_launch()
52
53 # Uncomment this code to force only a single test to run (by name).
54 #if not re.search(r"P_", self._testMethodName):
55 # self.skipTest("focusing on one test")
56
57 def reset_test_sequence(self):
58 self.test_sequence = GdbRemoteTestSequence(self.logger)
59
60 def init_llgs_test(self):
61 self.debug_monitor_exe = get_lldb_gdbserver_exe()
62 if not self.debug_monitor_exe:
63 self.skipTest("lldb_gdbserver exe not found")
Todd Fiala8aae4f42014-06-13 23:34:17 +000064 self.debug_monitor_extra_args = " -c 'log enable -T -f process-{}.log lldb break process thread' -c 'log enable -T -f packets-{}.log gdb-remote packets'".format(self.id(), self.id(), self.id())
Todd Fialae50b2e42014-06-13 19:11:33 +000065
66 def init_debugserver_test(self):
67 self.debug_monitor_exe = get_debugserver_exe()
68 if not self.debug_monitor_exe:
69 self.skipTest("debugserver exe not found")
70 self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName)
71
72 def create_socket(self):
73 sock = socket.socket()
74 logger = self.logger
75
76 def shutdown_socket():
77 if sock:
78 try:
79 # send the kill packet so lldb-gdbserver shuts down gracefully
80 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
81 except:
82 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
83
84 try:
85 sock.close()
86 except:
87 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
88
89 self.addTearDownHook(shutdown_socket)
90
91 sock.connect(('localhost', self.port))
92 return sock
93
94 def set_inferior_startup_launch(self):
95 self._inferior_startup = self._STARTUP_LAUNCH
96
97 def set_inferior_startup_attach(self):
98 self._inferior_startup = self._STARTUP_ATTACH
99
Todd Fiala8aae4f42014-06-13 23:34:17 +0000100 def launch_debug_monitor(self, attach_pid=None):
101 # Create the command line.
Todd Fialae50b2e42014-06-13 19:11:33 +0000102 commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port)
103 if attach_pid:
104 commandline += " --attach=%d" % attach_pid
105
Todd Fiala8aae4f42014-06-13 23:34:17 +0000106 # Start the server.
Todd Fialae50b2e42014-06-13 19:11:33 +0000107 server = pexpect.spawn(commandline)
108
109 # Turn on logging for what the child sends back.
110 if self.TraceOn():
111 server.logfile_read = sys.stdout
112
Todd Fiala8aae4f42014-06-13 23:34:17 +0000113 return server
114
115 def connect_to_debug_monitor(self, attach_pid=None):
116 server = self.launch_debug_monitor(attach_pid=attach_pid)
117
118 # Wait until we receive the server ready message before continuing.
119 server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port))
120
Todd Fialae50b2e42014-06-13 19:11:33 +0000121 # Schedule debug monitor to be shut down during teardown.
122 logger = self.logger
123 def shutdown_debug_monitor():
124 try:
125 server.close()
126 except:
127 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
Todd Fialae50b2e42014-06-13 19:11:33 +0000128 self.addTearDownHook(shutdown_debug_monitor)
129
Todd Fiala8aae4f42014-06-13 23:34:17 +0000130 attempts = 0
131 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000132
Todd Fiala8aae4f42014-06-13 23:34:17 +0000133 while attempts < MAX_ATTEMPTS:
134 # Create a socket to talk to the server
135 try:
136 self.sock = self.create_socket()
137 return server
138 except socket.error as serr:
139 # We're only trying to handle connection refused
140 if serr.errno != errno.ECONNREFUSED:
141 raise serr
Todd Fialae50b2e42014-06-13 19:11:33 +0000142
Todd Fiala8aae4f42014-06-13 23:34:17 +0000143 # Increment attempts.
144 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
145 attempts += 1
146
147 # And wait a second before next attempt.
148 time.sleep(1)
149
150 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000151
152 def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3):
153 # We're going to start a child process that the debug monitor stub can later attach to.
154 # This process needs to be started so that it just hangs around for a while. We'll
155 # have it sleep.
156 exe_path = os.path.abspath("a.out")
157
158 args = [exe_path]
159 if inferior_args:
160 args.extend(inferior_args)
161 if sleep_seconds:
162 args.append("sleep:%d" % sleep_seconds)
163
164 return subprocess.Popen(args)
165
166 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3):
167 """Prep the debug monitor, the inferior, and the expected packet stream.
168
169 Handle the separate cases of using the debug monitor in attach-to-inferior mode
170 and in launch-inferior mode.
171
172 For attach-to-inferior mode, the inferior process is first started, then
173 the debug monitor is started in attach to pid mode (using --attach on the
174 stub command line), and the no-ack-mode setup is appended to the packet
175 stream. The packet stream is not yet executed, ready to have more expected
176 packet entries added to it.
177
178 For launch-inferior mode, the stub is first started, then no ack mode is
179 setup on the expected packet stream, then the verified launch packets are added
180 to the expected socket stream. The packet stream is not yet executed, ready
181 to have more expected packet entries added to it.
182
183 The return value is:
184 {inferior:<inferior>, server:<server>}
185 """
186 inferior = None
187 attach_pid = None
188
189 if self._inferior_startup == self._STARTUP_ATTACH:
190 # Launch the process that we'll use as the inferior.
191 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds)
192 self.assertIsNotNone(inferior)
193 self.assertTrue(inferior.pid > 0)
194 attach_pid = inferior.pid
195
196 # Launch the debug monitor stub, attaching to the inferior.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000197 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000198 self.assertIsNotNone(server)
199
200 if self._inferior_startup == self._STARTUP_LAUNCH:
201 # Build launch args
202 launch_args = [os.path.abspath('a.out')]
203 if inferior_args:
204 launch_args.extend(inferior_args)
205
206 # Build the expected protocol stream
207 self.add_no_ack_remote_stream()
208 if self._inferior_startup == self._STARTUP_LAUNCH:
209 self.add_verified_launch_packets(launch_args)
210
211 return {"inferior":inferior, "server":server}
212
213 def add_no_ack_remote_stream(self):
214 self.test_sequence.add_log_lines(
215 ["read packet: +",
216 "read packet: $QStartNoAckMode#b0",
217 "send packet: +",
218 "send packet: $OK#9a",
219 "read packet: +"],
220 True)
221
222 def add_verified_launch_packets(self, launch_args):
223 self.test_sequence.add_log_lines(
224 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
225 "send packet: $OK#00",
226 "read packet: $qLaunchSuccess#a5",
227 "send packet: $OK#00"],
228 True)
229
230 def add_thread_suffix_request_packets(self):
231 self.test_sequence.add_log_lines(
232 ["read packet: $QThreadSuffixSupported#00",
233 "send packet: $OK#00",
234 ], True)
235
236 def add_process_info_collection_packets(self):
237 self.test_sequence.add_log_lines(
238 ["read packet: $qProcessInfo#00",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000239 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000240 True)
241
242 _KNOWN_PROCESS_INFO_KEYS = [
243 "pid",
244 "parent-pid",
245 "real-uid",
246 "real-gid",
247 "effective-uid",
248 "effective-gid",
249 "cputype",
250 "cpusubtype",
251 "ostype",
252 "vendor",
253 "endian",
254 "ptrsize"
255 ]
256
257 def parse_process_info_response(self, context):
258 # Ensure we have a process info response.
259 self.assertIsNotNone(context)
260 process_info_raw = context.get("process_info_raw")
261 self.assertIsNotNone(process_info_raw)
262
263 # Pull out key:value; pairs.
264 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
265
266 # Validate keys are known.
267 for (key, val) in process_info_dict.items():
268 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
269 self.assertIsNotNone(val)
270
271 return process_info_dict
272
273 def add_register_info_collection_packets(self):
274 self.test_sequence.add_log_lines(
275 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
276 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
277 "save_key":"reg_info_responses" } ],
278 True)
279
280 def parse_register_info_packets(self, context):
281 """Return an array of register info dictionaries, one per register info."""
282 reg_info_responses = context.get("reg_info_responses")
283 self.assertIsNotNone(reg_info_responses)
284
285 # Parse register infos.
286 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
287
Todd Fiala8aae4f42014-06-13 23:34:17 +0000288 def expect_gdbremote_sequence(self, timeout_seconds =None):
289 if not timeout_seconds:
290 timeout_seconds = self._TIMEOUT_SECONDS
291 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000292
293 _KNOWN_REGINFO_KEYS = [
294 "name",
295 "alt-name",
296 "bitsize",
297 "offset",
298 "encoding",
299 "format",
300 "set",
301 "gcc",
302 "dwarf",
303 "generic",
304 "container-regs",
305 "invalidate-regs"
306 ]
307
308 def assert_valid_reg_info(self, reg_info):
309 # Assert we know about all the reginfo keys parsed.
310 for key in reg_info:
311 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
312
313 # Check the bare-minimum expected set of register info keys.
314 self.assertTrue("name" in reg_info)
315 self.assertTrue("bitsize" in reg_info)
316 self.assertTrue("offset" in reg_info)
317 self.assertTrue("encoding" in reg_info)
318 self.assertTrue("format" in reg_info)
319
320 def find_pc_reg_info(self, reg_infos):
321 lldb_reg_index = 0
322 for reg_info in reg_infos:
323 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
324 return (lldb_reg_index, reg_info)
325 lldb_reg_index += 1
326
327 return (None, None)
328
329 def add_lldb_register_index(self, reg_infos):
330 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
331
332 We'll use this when we want to call packets like P/p with a register index but do so
333 on only a subset of the full register info set.
334 """
335 self.assertIsNotNone(reg_infos)
336
337 reg_index = 0
338 for reg_info in reg_infos:
339 reg_info["lldb_register_index"] = reg_index
340 reg_index += 1
341
342 def add_query_memory_region_packets(self, address):
343 self.test_sequence.add_log_lines(
344 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
345 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
346 True)
347
348 def parse_key_val_dict(self, key_val_text):
349 self.assertIsNotNone(key_val_text)
350 kv_dict = {}
351 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
352 kv_dict[match.group(1)] = match.group(2)
353 return kv_dict
354
355 def parse_memory_region_packet(self, context):
356 # Ensure we have a context.
357 self.assertIsNotNone(context.get("memory_region_response"))
358
359 # Pull out key:value; pairs.
360 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
361
362 # Validate keys are known.
363 for (key, val) in mem_region_dict.items():
364 self.assertTrue(key in ["start", "size", "permissions", "error"])
365 self.assertIsNotNone(val)
366
367 # Return the dictionary of key-value pairs for the memory region.
368 return mem_region_dict
369
370 def assert_address_within_memory_region(self, test_address, mem_region_dict):
371 self.assertIsNotNone(mem_region_dict)
372 self.assertTrue("start" in mem_region_dict)
373 self.assertTrue("size" in mem_region_dict)
374
375 range_start = int(mem_region_dict["start"], 16)
376 range_size = int(mem_region_dict["size"], 16)
377 range_end = range_start + range_size
378
379 if test_address < range_start:
380 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
381 elif test_address >= range_end:
382 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
383
384 def add_threadinfo_collection_packets(self):
385 self.test_sequence.add_log_lines(
386 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
387 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
388 "save_key":"threadinfo_responses" } ],
389 True)
390
391 def parse_threadinfo_packets(self, context):
392 """Return an array of thread ids (decimal ints), one per thread."""
393 threadinfo_responses = context.get("threadinfo_responses")
394 self.assertIsNotNone(threadinfo_responses)
395
396 thread_ids = []
397 for threadinfo_response in threadinfo_responses:
398 new_thread_infos = parse_threadinfo_response(threadinfo_response)
399 thread_ids.extend(new_thread_infos)
400 return thread_ids
401
402 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
403 start_time = time.time()
404 timeout_time = start_time + timeout_seconds
405
406 actual_thread_count = 0
407 while actual_thread_count < thread_count:
408 self.reset_test_sequence()
409 self.add_threadinfo_collection_packets()
410
411 context = self.expect_gdbremote_sequence()
412 self.assertIsNotNone(context)
413
414 threads = self.parse_threadinfo_packets(context)
415 self.assertIsNotNone(threads)
416
417 actual_thread_count = len(threads)
418
419 if time.time() > timeout_time:
420 raise Exception(
421 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
422 timeout_seconds, thread_count, actual_thread_count))
423
424 return threads
425
426 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
427 self.test_sequence.add_log_lines(
428 [# Set the breakpoint.
429 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
430 # Verify the stub could set it.
431 "send packet: $OK#00",
432 ], True)
433
434 if (do_continue):
435 self.test_sequence.add_log_lines(
436 [# Continue the inferior.
437 "read packet: $c#00",
438 # Expect a breakpoint stop report.
439 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
440 ], True)
441
442 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
443 self.test_sequence.add_log_lines(
444 [# Remove the breakpoint.
445 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
446 # Verify the stub could unset it.
447 "send packet: $OK#00",
448 ], True)
449
450 def add_qSupported_packets(self):
451 self.test_sequence.add_log_lines(
452 ["read packet: $qSupported#00",
453 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
454 ], True)
455
456 _KNOWN_QSUPPORTED_STUB_FEATURES = [
457 "augmented-libraries-svr4-read",
458 "PacketSize",
459 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000460 "QThreadSuffixSupported",
Todd Fialae50b2e42014-06-13 19:11:33 +0000461 "qXfer:auxv:read",
462 "qXfer:libraries:read",
463 "qXfer:libraries-svr4:read",
464 ]
465
466 def parse_qSupported_response(self, context):
467 self.assertIsNotNone(context)
468
469 raw_response = context.get("qSupported_response")
470 self.assertIsNotNone(raw_response)
471
472 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
473 # +,-,? is stripped from the key and set as the value.
474 supported_dict = {}
475 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
476 key = match.group(1)
477 val = match.group(3)
478
479 # key=val: store as is
480 if val and len(val) > 0:
481 supported_dict[key] = val
482 else:
483 if len(key) < 2:
484 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
485 supported_type = key[-1]
486 key = key[:-1]
487 if not supported_type in ["+", "-", "?"]:
488 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
489 supported_dict[key] = supported_type
490 # Ensure we know the supported element
491 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
492 raise Exception("unknown qSupported stub feature reported: %s" % key)
493
494 return supported_dict
495
496 def run_process_then_stop(self, run_seconds=1):
497 # Tell the stub to continue.
498 self.test_sequence.add_log_lines(
499 ["read packet: $vCont;c#00"],
500 True)
501 context = self.expect_gdbremote_sequence()
502
503 # Wait for run_seconds.
504 time.sleep(run_seconds)
505
506 # Send an interrupt, capture a T response.
507 self.reset_test_sequence()
508 self.test_sequence.add_log_lines(
509 ["read packet: {}".format(chr(03)),
510 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
511 True)
512 context = self.expect_gdbremote_sequence()
513 self.assertIsNotNone(context)
514 self.assertIsNotNone(context.get("stop_result"))
515
516 return context
517
518 def select_modifiable_register(self, reg_infos):
519 """Find a register that can be read/written freely."""
520 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
521
522 # First check for the first register from the preferred register name set.
523 alternative_register_index = None
524
525 self.assertIsNotNone(reg_infos)
526 for reg_info in reg_infos:
527 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
528 # We found a preferred register. Use it.
529 return reg_info["lldb_register_index"]
530 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
531 # A frame pointer register will do as a register to modify temporarily.
532 alternative_register_index = reg_info["lldb_register_index"]
533
534 # We didn't find a preferred register. Return whatever alternative register
535 # we found, if any.
536 return alternative_register_index
537
538 def extract_registers_from_stop_notification(self, stop_key_vals_text):
539 self.assertIsNotNone(stop_key_vals_text)
540 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
541
542 registers = {}
543 for (key, val) in kv_dict.items():
544 if re.match(r"^[0-9a-fA-F]+", key):
545 registers[int(key, 16)] = val
546 return registers
547
548 def gather_register_infos(self):
549 self.reset_test_sequence()
550 self.add_register_info_collection_packets()
551
552 context = self.expect_gdbremote_sequence()
553 self.assertIsNotNone(context)
554
555 reg_infos = self.parse_register_info_packets(context)
556 self.assertIsNotNone(reg_infos)
557 self.add_lldb_register_index(reg_infos)
558
559 return reg_infos
560
561 def find_generic_register_with_name(self, reg_infos, generic_name):
562 self.assertIsNotNone(reg_infos)
563 for reg_info in reg_infos:
564 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
565 return reg_info
566 return None
567
568