blob: 96e941b244e31fb4ee0f62f8209c0ee0aaf1ec80 [file] [log] [blame]
Todd Fialae50b2e42014-06-13 19:11:33 +00001"""
2Base class for gdb-remote test cases.
3"""
4
Todd Fiala8aae4f42014-06-13 23:34:17 +00005import errno
Todd Fialae50b2e42014-06-13 19:11:33 +00006import unittest2
7import pexpect
8import platform
Todd Fiala9e2d3292014-07-09 23:10:43 +00009import random
Todd Fialae2202002014-06-27 22:11:56 +000010import re
Todd Fialae50b2e42014-06-13 19:11:33 +000011import sets
12import signal
13import socket
14import subprocess
15import sys
16import time
17from lldbtest import *
18from lldbgdbserverutils import *
19import logging
20import os.path
21
22class GdbRemoteTestCaseBase(TestBase):
23
24 mydir = TestBase.compute_mydir(__file__)
25
Todd Fialae50b2e42014-06-13 19:11:33 +000026 _TIMEOUT_SECONDS = 5
27
28 _GDBREMOTE_KILL_PACKET = "$k#6b"
29
30 _LOGGING_LEVEL = logging.WARNING
31 # _LOGGING_LEVEL = logging.DEBUG
32
33 _STARTUP_ATTACH = "attach"
34 _STARTUP_LAUNCH = "launch"
35
36 # GDB Signal numbers that are not target-specific used for common exceptions
37 TARGET_EXC_BAD_ACCESS = 0x91
38 TARGET_EXC_BAD_INSTRUCTION = 0x92
39 TARGET_EXC_ARITHMETIC = 0x93
40 TARGET_EXC_EMULATION = 0x94
41 TARGET_EXC_SOFTWARE = 0x95
42 TARGET_EXC_BREAKPOINT = 0x96
43
44 def setUp(self):
45 TestBase.setUp(self)
46 FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s'
47 logging.basicConfig(format=FORMAT)
48 self.logger = logging.getLogger(__name__)
49 self.logger.setLevel(self._LOGGING_LEVEL)
50 self.test_sequence = GdbRemoteTestSequence(self.logger)
51 self.set_inferior_startup_launch()
Todd Fiala9e2d3292014-07-09 23:10:43 +000052 self.port = self.get_next_port()
Todd Fialae50b2e42014-06-13 19:11:33 +000053
Todd Fiala9e2d3292014-07-09 23:10:43 +000054 def get_next_port(self):
55 return 12000 + random.randint(0,3999)
Todd Fialae50b2e42014-06-13 19:11:33 +000056
57 def reset_test_sequence(self):
58 self.test_sequence = GdbRemoteTestSequence(self.logger)
59
60 def init_llgs_test(self):
61 self.debug_monitor_exe = get_lldb_gdbserver_exe()
62 if not self.debug_monitor_exe:
63 self.skipTest("lldb_gdbserver exe not found")
Todd Fiala8aae4f42014-06-13 23:34:17 +000064 self.debug_monitor_extra_args = " -c 'log enable -T -f process-{}.log lldb break process thread' -c 'log enable -T -f packets-{}.log gdb-remote packets'".format(self.id(), self.id(), self.id())
Todd Fialae50b2e42014-06-13 19:11:33 +000065
66 def init_debugserver_test(self):
67 self.debug_monitor_exe = get_debugserver_exe()
68 if not self.debug_monitor_exe:
69 self.skipTest("debugserver exe not found")
70 self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName)
71
72 def create_socket(self):
73 sock = socket.socket()
74 logger = self.logger
75
76 def shutdown_socket():
77 if sock:
78 try:
79 # send the kill packet so lldb-gdbserver shuts down gracefully
80 sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
81 except:
82 logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
83
84 try:
85 sock.close()
86 except:
87 logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0]))
88
89 self.addTearDownHook(shutdown_socket)
90
91 sock.connect(('localhost', self.port))
92 return sock
93
94 def set_inferior_startup_launch(self):
95 self._inferior_startup = self._STARTUP_LAUNCH
96
97 def set_inferior_startup_attach(self):
98 self._inferior_startup = self._STARTUP_ATTACH
99
Todd Fiala8aae4f42014-06-13 23:34:17 +0000100 def launch_debug_monitor(self, attach_pid=None):
101 # Create the command line.
Todd Fialae50b2e42014-06-13 19:11:33 +0000102 commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port)
103 if attach_pid:
104 commandline += " --attach=%d" % attach_pid
105
Todd Fiala8aae4f42014-06-13 23:34:17 +0000106 # Start the server.
Todd Fialae50b2e42014-06-13 19:11:33 +0000107 server = pexpect.spawn(commandline)
108
109 # Turn on logging for what the child sends back.
110 if self.TraceOn():
111 server.logfile_read = sys.stdout
112
Todd Fiala8aae4f42014-06-13 23:34:17 +0000113 return server
114
115 def connect_to_debug_monitor(self, attach_pid=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000116 attempts = 0
117 MAX_ATTEMPTS = 20
Todd Fialae50b2e42014-06-13 19:11:33 +0000118
Todd Fiala8aae4f42014-06-13 23:34:17 +0000119 while attempts < MAX_ATTEMPTS:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000120 server = self.launch_debug_monitor(attach_pid=attach_pid)
121
122 # Wait until we receive the server ready message before continuing.
123 port_good = True
Todd Fiala8aae4f42014-06-13 23:34:17 +0000124 try:
Todd Fiala9e2d3292014-07-09 23:10:43 +0000125 server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port))
126 except:
127 port_good = False
128 server.close()
Todd Fialae50b2e42014-06-13 19:11:33 +0000129
Todd Fiala9e2d3292014-07-09 23:10:43 +0000130 if port_good:
131 # Schedule debug monitor to be shut down during teardown.
132 logger = self.logger
133 def shutdown_debug_monitor():
134 try:
135 server.close()
136 except:
137 logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0]))
138 self.addTearDownHook(shutdown_debug_monitor)
Todd Fiala8aae4f42014-06-13 23:34:17 +0000139
Todd Fiala9e2d3292014-07-09 23:10:43 +0000140 # Create a socket to talk to the server
141 try:
142 self.sock = self.create_socket()
143 return server
144 except socket.error as serr:
145 # We're only trying to handle connection refused.
146 if serr.errno != errno.ECONNREFUSED:
147 raise serr
148 # We should close the server here to be safe.
149 server.close()
150
151 # Increment attempts.
152 print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS))
153 attempts += 1
154
155 # And wait a random length of time before next attempt, to avoid collisions.
156 time.sleep(random.randint(1,5))
157
158 # Now grab a new port number.
159 self.port = self.get_next_port()
Todd Fiala8aae4f42014-06-13 23:34:17 +0000160
161 raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts)
Todd Fialae50b2e42014-06-13 19:11:33 +0000162
163 def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3):
164 # We're going to start a child process that the debug monitor stub can later attach to.
165 # This process needs to be started so that it just hangs around for a while. We'll
166 # have it sleep.
167 exe_path = os.path.abspath("a.out")
168
169 args = [exe_path]
170 if inferior_args:
171 args.extend(inferior_args)
172 if sleep_seconds:
173 args.append("sleep:%d" % sleep_seconds)
174
175 return subprocess.Popen(args)
176
177 def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3):
178 """Prep the debug monitor, the inferior, and the expected packet stream.
179
180 Handle the separate cases of using the debug monitor in attach-to-inferior mode
181 and in launch-inferior mode.
182
183 For attach-to-inferior mode, the inferior process is first started, then
184 the debug monitor is started in attach to pid mode (using --attach on the
185 stub command line), and the no-ack-mode setup is appended to the packet
186 stream. The packet stream is not yet executed, ready to have more expected
187 packet entries added to it.
188
189 For launch-inferior mode, the stub is first started, then no ack mode is
190 setup on the expected packet stream, then the verified launch packets are added
191 to the expected socket stream. The packet stream is not yet executed, ready
192 to have more expected packet entries added to it.
193
194 The return value is:
195 {inferior:<inferior>, server:<server>}
196 """
197 inferior = None
198 attach_pid = None
199
200 if self._inferior_startup == self._STARTUP_ATTACH:
201 # Launch the process that we'll use as the inferior.
202 inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds)
203 self.assertIsNotNone(inferior)
204 self.assertTrue(inferior.pid > 0)
205 attach_pid = inferior.pid
206
207 # Launch the debug monitor stub, attaching to the inferior.
Todd Fiala8aae4f42014-06-13 23:34:17 +0000208 server = self.connect_to_debug_monitor(attach_pid=attach_pid)
Todd Fialae50b2e42014-06-13 19:11:33 +0000209 self.assertIsNotNone(server)
210
211 if self._inferior_startup == self._STARTUP_LAUNCH:
212 # Build launch args
213 launch_args = [os.path.abspath('a.out')]
214 if inferior_args:
215 launch_args.extend(inferior_args)
216
217 # Build the expected protocol stream
218 self.add_no_ack_remote_stream()
219 if self._inferior_startup == self._STARTUP_LAUNCH:
220 self.add_verified_launch_packets(launch_args)
221
222 return {"inferior":inferior, "server":server}
223
224 def add_no_ack_remote_stream(self):
225 self.test_sequence.add_log_lines(
226 ["read packet: +",
227 "read packet: $QStartNoAckMode#b0",
228 "send packet: +",
229 "send packet: $OK#9a",
230 "read packet: +"],
231 True)
232
233 def add_verified_launch_packets(self, launch_args):
234 self.test_sequence.add_log_lines(
235 ["read packet: %s" % build_gdbremote_A_packet(launch_args),
236 "send packet: $OK#00",
237 "read packet: $qLaunchSuccess#a5",
238 "send packet: $OK#00"],
239 True)
240
241 def add_thread_suffix_request_packets(self):
242 self.test_sequence.add_log_lines(
243 ["read packet: $QThreadSuffixSupported#00",
244 "send packet: $OK#00",
245 ], True)
246
247 def add_process_info_collection_packets(self):
248 self.test_sequence.add_log_lines(
249 ["read packet: $qProcessInfo#00",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000250 { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }],
Todd Fialae50b2e42014-06-13 19:11:33 +0000251 True)
252
253 _KNOWN_PROCESS_INFO_KEYS = [
254 "pid",
255 "parent-pid",
256 "real-uid",
257 "real-gid",
258 "effective-uid",
259 "effective-gid",
260 "cputype",
261 "cpusubtype",
262 "ostype",
263 "vendor",
264 "endian",
265 "ptrsize"
266 ]
267
268 def parse_process_info_response(self, context):
269 # Ensure we have a process info response.
270 self.assertIsNotNone(context)
271 process_info_raw = context.get("process_info_raw")
272 self.assertIsNotNone(process_info_raw)
273
274 # Pull out key:value; pairs.
275 process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) }
276
277 # Validate keys are known.
278 for (key, val) in process_info_dict.items():
279 self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
280 self.assertIsNotNone(val)
281
282 return process_info_dict
283
284 def add_register_info_collection_packets(self):
285 self.test_sequence.add_log_lines(
286 [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True,
287 "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
288 "save_key":"reg_info_responses" } ],
289 True)
290
291 def parse_register_info_packets(self, context):
292 """Return an array of register info dictionaries, one per register info."""
293 reg_info_responses = context.get("reg_info_responses")
294 self.assertIsNotNone(reg_info_responses)
295
296 # Parse register infos.
297 return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses]
298
Todd Fiala50a211b2014-06-14 22:00:36 +0000299 def expect_gdbremote_sequence(self, timeout_seconds=None):
Todd Fiala8aae4f42014-06-13 23:34:17 +0000300 if not timeout_seconds:
301 timeout_seconds = self._TIMEOUT_SECONDS
302 return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger)
Todd Fialae50b2e42014-06-13 19:11:33 +0000303
304 _KNOWN_REGINFO_KEYS = [
305 "name",
306 "alt-name",
307 "bitsize",
308 "offset",
309 "encoding",
310 "format",
311 "set",
312 "gcc",
313 "dwarf",
314 "generic",
315 "container-regs",
316 "invalidate-regs"
317 ]
318
319 def assert_valid_reg_info(self, reg_info):
320 # Assert we know about all the reginfo keys parsed.
321 for key in reg_info:
322 self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
323
324 # Check the bare-minimum expected set of register info keys.
325 self.assertTrue("name" in reg_info)
326 self.assertTrue("bitsize" in reg_info)
327 self.assertTrue("offset" in reg_info)
328 self.assertTrue("encoding" in reg_info)
329 self.assertTrue("format" in reg_info)
330
331 def find_pc_reg_info(self, reg_infos):
332 lldb_reg_index = 0
333 for reg_info in reg_infos:
334 if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
335 return (lldb_reg_index, reg_info)
336 lldb_reg_index += 1
337
338 return (None, None)
339
340 def add_lldb_register_index(self, reg_infos):
341 """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
342
343 We'll use this when we want to call packets like P/p with a register index but do so
344 on only a subset of the full register info set.
345 """
346 self.assertIsNotNone(reg_infos)
347
348 reg_index = 0
349 for reg_info in reg_infos:
350 reg_info["lldb_register_index"] = reg_index
351 reg_index += 1
352
353 def add_query_memory_region_packets(self, address):
354 self.test_sequence.add_log_lines(
355 ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
356 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }],
357 True)
358
Todd Fialac30281a2014-06-14 03:03:23 +0000359 def parse_key_val_dict(self, key_val_text, allow_dupes=True):
Todd Fialae50b2e42014-06-13 19:11:33 +0000360 self.assertIsNotNone(key_val_text)
361 kv_dict = {}
362 for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
Todd Fialac30281a2014-06-14 03:03:23 +0000363 key = match.group(1)
364 val = match.group(2)
365 if key in kv_dict:
366 if allow_dupes:
367 if type(kv_dict[key]) == list:
368 kv_dict[key].append(val)
369 else:
370 # Promote to list
371 kv_dict[key] = [kv_dict[key], val]
372 else:
373 self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict))
374 else:
375 kv_dict[key] = val
Todd Fialae50b2e42014-06-13 19:11:33 +0000376 return kv_dict
377
378 def parse_memory_region_packet(self, context):
379 # Ensure we have a context.
380 self.assertIsNotNone(context.get("memory_region_response"))
381
382 # Pull out key:value; pairs.
383 mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response"))
384
385 # Validate keys are known.
386 for (key, val) in mem_region_dict.items():
387 self.assertTrue(key in ["start", "size", "permissions", "error"])
388 self.assertIsNotNone(val)
389
390 # Return the dictionary of key-value pairs for the memory region.
391 return mem_region_dict
392
393 def assert_address_within_memory_region(self, test_address, mem_region_dict):
394 self.assertIsNotNone(mem_region_dict)
395 self.assertTrue("start" in mem_region_dict)
396 self.assertTrue("size" in mem_region_dict)
397
398 range_start = int(mem_region_dict["start"], 16)
399 range_size = int(mem_region_dict["size"], 16)
400 range_end = range_start + range_size
401
402 if test_address < range_start:
403 self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
404 elif test_address >= range_end:
405 self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size))
406
407 def add_threadinfo_collection_packets(self):
408 self.test_sequence.add_log_lines(
409 [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo",
410 "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
411 "save_key":"threadinfo_responses" } ],
412 True)
413
414 def parse_threadinfo_packets(self, context):
415 """Return an array of thread ids (decimal ints), one per thread."""
416 threadinfo_responses = context.get("threadinfo_responses")
417 self.assertIsNotNone(threadinfo_responses)
418
419 thread_ids = []
420 for threadinfo_response in threadinfo_responses:
421 new_thread_infos = parse_threadinfo_response(threadinfo_response)
422 thread_ids.extend(new_thread_infos)
423 return thread_ids
424
425 def wait_for_thread_count(self, thread_count, timeout_seconds=3):
426 start_time = time.time()
427 timeout_time = start_time + timeout_seconds
428
429 actual_thread_count = 0
430 while actual_thread_count < thread_count:
431 self.reset_test_sequence()
432 self.add_threadinfo_collection_packets()
433
434 context = self.expect_gdbremote_sequence()
435 self.assertIsNotNone(context)
436
437 threads = self.parse_threadinfo_packets(context)
438 self.assertIsNotNone(threads)
439
440 actual_thread_count = len(threads)
441
442 if time.time() > timeout_time:
443 raise Exception(
444 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
445 timeout_seconds, thread_count, actual_thread_count))
446
447 return threads
448
449 def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1):
450 self.test_sequence.add_log_lines(
451 [# Set the breakpoint.
452 "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind),
453 # Verify the stub could set it.
454 "send packet: $OK#00",
455 ], True)
456
457 if (do_continue):
458 self.test_sequence.add_log_lines(
459 [# Continue the inferior.
460 "read packet: $c#00",
461 # Expect a breakpoint stop report.
462 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
463 ], True)
464
465 def add_remove_breakpoint_packets(self, address, breakpoint_kind=1):
466 self.test_sequence.add_log_lines(
467 [# Remove the breakpoint.
468 "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind),
469 # Verify the stub could unset it.
470 "send packet: $OK#00",
471 ], True)
472
473 def add_qSupported_packets(self):
474 self.test_sequence.add_log_lines(
475 ["read packet: $qSupported#00",
476 {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}},
477 ], True)
478
479 _KNOWN_QSUPPORTED_STUB_FEATURES = [
480 "augmented-libraries-svr4-read",
481 "PacketSize",
482 "QStartNoAckMode",
Todd Fiala8aae4f42014-06-13 23:34:17 +0000483 "QThreadSuffixSupported",
Todd Fiala43ab82c2014-06-15 23:33:09 +0000484 "QListThreadsInStopReply",
Todd Fialae50b2e42014-06-13 19:11:33 +0000485 "qXfer:auxv:read",
486 "qXfer:libraries:read",
487 "qXfer:libraries-svr4:read",
488 ]
489
490 def parse_qSupported_response(self, context):
491 self.assertIsNotNone(context)
492
493 raw_response = context.get("qSupported_response")
494 self.assertIsNotNone(raw_response)
495
496 # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
497 # +,-,? is stripped from the key and set as the value.
498 supported_dict = {}
499 for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
500 key = match.group(1)
501 val = match.group(3)
502
503 # key=val: store as is
504 if val and len(val) > 0:
505 supported_dict[key] = val
506 else:
507 if len(key) < 2:
508 raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}")
509 supported_type = key[-1]
510 key = key[:-1]
511 if not supported_type in ["+", "-", "?"]:
512 raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
513 supported_dict[key] = supported_type
514 # Ensure we know the supported element
515 if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES:
516 raise Exception("unknown qSupported stub feature reported: %s" % key)
517
518 return supported_dict
519
520 def run_process_then_stop(self, run_seconds=1):
521 # Tell the stub to continue.
522 self.test_sequence.add_log_lines(
523 ["read packet: $vCont;c#00"],
524 True)
525 context = self.expect_gdbremote_sequence()
526
527 # Wait for run_seconds.
528 time.sleep(run_seconds)
529
530 # Send an interrupt, capture a T response.
531 self.reset_test_sequence()
532 self.test_sequence.add_log_lines(
533 ["read packet: {}".format(chr(03)),
534 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }],
535 True)
536 context = self.expect_gdbremote_sequence()
537 self.assertIsNotNone(context)
538 self.assertIsNotNone(context.get("stop_result"))
539
540 return context
541
542 def select_modifiable_register(self, reg_infos):
543 """Find a register that can be read/written freely."""
544 PREFERRED_REGISTER_NAMES = sets.Set(["rax",])
545
546 # First check for the first register from the preferred register name set.
547 alternative_register_index = None
548
549 self.assertIsNotNone(reg_infos)
550 for reg_info in reg_infos:
551 if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES):
552 # We found a preferred register. Use it.
553 return reg_info["lldb_register_index"]
554 if ("generic" in reg_info) and (reg_info["generic"] == "fp"):
555 # A frame pointer register will do as a register to modify temporarily.
556 alternative_register_index = reg_info["lldb_register_index"]
557
558 # We didn't find a preferred register. Return whatever alternative register
559 # we found, if any.
560 return alternative_register_index
561
562 def extract_registers_from_stop_notification(self, stop_key_vals_text):
563 self.assertIsNotNone(stop_key_vals_text)
564 kv_dict = self.parse_key_val_dict(stop_key_vals_text)
565
566 registers = {}
567 for (key, val) in kv_dict.items():
568 if re.match(r"^[0-9a-fA-F]+", key):
569 registers[int(key, 16)] = val
570 return registers
571
572 def gather_register_infos(self):
573 self.reset_test_sequence()
574 self.add_register_info_collection_packets()
575
576 context = self.expect_gdbremote_sequence()
577 self.assertIsNotNone(context)
578
579 reg_infos = self.parse_register_info_packets(context)
580 self.assertIsNotNone(reg_infos)
581 self.add_lldb_register_index(reg_infos)
582
583 return reg_infos
584
585 def find_generic_register_with_name(self, reg_infos, generic_name):
586 self.assertIsNotNone(reg_infos)
587 for reg_info in reg_infos:
588 if ("generic" in reg_info) and (reg_info["generic"] == generic_name):
589 return reg_info
590 return None
591
Todd Fiala8d7ab8c2014-06-17 16:04:45 +0000592 def decode_gdbremote_binary(self, encoded_bytes):
593 decoded_bytes = ""
594 i = 0
595 while i < len(encoded_bytes):
596 if encoded_bytes[i] == "}":
597 # Handle escaped char.
598 self.assertTrue(i + 1 < len(encoded_bytes))
599 decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20)
600 i +=2
601 elif encoded_bytes[i] == "*":
602 # Handle run length encoding.
603 self.assertTrue(len(decoded_bytes) > 0)
604 self.assertTrue(i + 1 < len(encoded_bytes))
605 repeat_count = ord(encoded_bytes[i+1]) - 29
606 decoded_bytes += decoded_bytes[-1] * repeat_count
607 i += 2
608 else:
609 decoded_bytes += encoded_bytes[i]
610 i += 1
611 return decoded_bytes
612
613 def build_auxv_dict(self, endian, word_size, auxv_data):
614 self.assertIsNotNone(endian)
615 self.assertIsNotNone(word_size)
616 self.assertIsNotNone(auxv_data)
617
618 auxv_dict = {}
619
620 while len(auxv_data) > 0:
621 # Chop off key.
622 raw_key = auxv_data[:word_size]
623 auxv_data = auxv_data[word_size:]
624
625 # Chop of value.
626 raw_value = auxv_data[:word_size]
627 auxv_data = auxv_data[word_size:]
628
629 # Convert raw text from target endian.
630 key = unpack_endian_binary_string(endian, raw_key)
631 value = unpack_endian_binary_string(endian, raw_value)
632
633 # Handle ending entry.
634 if key == 0:
635 self.assertEquals(value, 0)
636 return auxv_dict
637
638 # The key should not already be present.
639 self.assertFalse(key in auxv_dict)
640 auxv_dict[key] = value
641
642 self.fail("should not reach here - implies required double zero entry not found")
643 return auxv_dict
Todd Fiala51886732014-06-17 22:01:27 +0000644
645 def read_binary_data_in_chunks(self, command_prefix, chunk_length):
646 """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
647 offset = 0
648 done = False
649 decoded_data = ""
650
651 while not done:
652 # Grab the next iteration of data.
653 self.reset_test_sequence()
654 self.test_sequence.add_log_lines([
655 "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length),
Todd Fiala4c24eba2014-06-19 17:35:40 +0000656 {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} }
Todd Fiala51886732014-06-17 22:01:27 +0000657 ], True)
658
659 context = self.expect_gdbremote_sequence()
660 self.assertIsNotNone(context)
661
662 response_type = context.get("response_type")
663 self.assertIsNotNone(response_type)
664 self.assertTrue(response_type in ["l", "m"])
665
666 # Move offset along.
667 offset += chunk_length
668
669 # Figure out if we're done. We're done if the response type is l.
670 done = response_type == "l"
671
672 # Decode binary data.
673 content_raw = context.get("content_raw")
674 if content_raw and len(content_raw) > 0:
675 self.assertIsNotNone(content_raw)
676 decoded_data += self.decode_gdbremote_binary(content_raw)
677 return decoded_data
Todd Fiala4c24eba2014-06-19 17:35:40 +0000678
679 def add_interrupt_packets(self):
680 self.test_sequence.add_log_lines([
681 # Send the intterupt.
682 "read packet: {}".format(chr(03)),
683 # And wait for the stop notification.
684 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } },
685 ], True)
686
687 def parse_interrupt_packets(self, context):
688 self.assertIsNotNone(context.get("stop_signo"))
689 self.assertIsNotNone(context.get("stop_key_val_text"))
Todd Fiala9846d452014-06-20 17:39:24 +0000690 return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"]))
691
692 def add_QSaveRegisterState_packets(self, thread_id):
693 if thread_id:
694 # Use the thread suffix form.
695 request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id)
696 else:
697 request = "read packet: $QSaveRegisterState#00"
698
699 self.test_sequence.add_log_lines([
700 request,
701 {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } },
702 ], True)
703
704 def parse_QSaveRegisterState_response(self, context):
705 self.assertIsNotNone(context)
706
707 save_response = context.get("save_response")
708 self.assertIsNotNone(save_response)
709
710 if len(save_response) < 1 or save_response[0] == "E":
711 # error received
712 return (False, None)
713 else:
714 return (True, int(save_response))
715
716 def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
717 if thread_id:
718 # Use the thread suffix form.
719 request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id)
720 else:
721 request = "read packet: $QRestoreRegisterState:{}#00".format(save_id)
722
723 self.test_sequence.add_log_lines([
724 request,
725 "send packet: $OK#00"
726 ], True)
727
728 def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None):
729 self.assertIsNotNone(reg_infos)
730
731 successful_writes = 0
732 failed_writes = 0
733
734 for reg_info in reg_infos:
735 # Use the lldb register index added to the reg info. We're not necessarily
736 # working off a full set of register infos, so an inferred register index could be wrong.
737 reg_index = reg_info["lldb_register_index"]
738 self.assertIsNotNone(reg_index)
739
740 reg_byte_size = int(reg_info["bitsize"])/8
741 self.assertTrue(reg_byte_size > 0)
742
743 # Handle thread suffix.
744 if thread_id:
745 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
746 else:
747 p_request = "read packet: $p{:x}#00".format(reg_index)
748
749 # Read the existing value.
750 self.reset_test_sequence()
751 self.test_sequence.add_log_lines([
752 p_request,
753 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
754 ], True)
755 context = self.expect_gdbremote_sequence()
756 self.assertIsNotNone(context)
757
758 # Verify the response length.
759 p_response = context.get("p_response")
760 self.assertIsNotNone(p_response)
761 initial_reg_value = unpack_register_hex_unsigned(endian, p_response)
762
763 # Flip the value by xoring with all 1s
764 all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
765 flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
766 # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)
767
768 # Handle thread suffix for P.
769 if thread_id:
770 P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
771 else:
772 P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size))
773
774 # Write the flipped value to the register.
775 self.reset_test_sequence()
776 self.test_sequence.add_log_lines([
777 P_request,
778 { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} },
779 ], True)
780 context = self.expect_gdbremote_sequence()
781 self.assertIsNotNone(context)
782
783 # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
784 # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
785 # all flipping perfectly.
786 P_response = context.get("P_response")
787 self.assertIsNotNone(P_response)
788 if P_response == "OK":
789 successful_writes += 1
790 else:
791 failed_writes += 1
792 # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)
793
794 # Read back the register value, ensure it matches the flipped value.
795 if P_response == "OK":
796 self.reset_test_sequence()
797 self.test_sequence.add_log_lines([
798 p_request,
799 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
800 ], True)
801 context = self.expect_gdbremote_sequence()
802 self.assertIsNotNone(context)
803
804 verify_p_response_raw = context.get("p_response")
805 self.assertIsNotNone(verify_p_response_raw)
806 verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw)
807
808 if verify_bits != flipped_bits_int:
809 # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
810 # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)
811 successful_writes -= 1
812 failed_writes +=1
813
814 return (successful_writes, failed_writes)
815
816 def is_bit_flippable_register(self, reg_info):
817 if not reg_info:
818 return False
819 if not "set" in reg_info:
820 return False
821 if reg_info["set"] != "General Purpose Registers":
822 return False
823 if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0):
824 # Don't try to bit flip registers contained in another register.
825 return False
826 if re.match("^.s$", reg_info["name"]):
827 # This is a 2-letter register name that ends in "s", like a segment register.
828 # Don't try to bit flip these.
829 return False
830 # Okay, this looks fine-enough.
831 return True
832
833 def read_register_values(self, reg_infos, endian, thread_id=None):
834 self.assertIsNotNone(reg_infos)
835 values = {}
836
837 for reg_info in reg_infos:
838 # We append a register index when load reg infos so we can work with subsets.
839 reg_index = reg_info.get("lldb_register_index")
840 self.assertIsNotNone(reg_index)
841
842 # Handle thread suffix.
843 if thread_id:
844 p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id)
845 else:
846 p_request = "read packet: $p{:x}#00".format(reg_index)
847
848 # Read it with p.
849 self.reset_test_sequence()
850 self.test_sequence.add_log_lines([
851 p_request,
852 { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} },
853 ], True)
854 context = self.expect_gdbremote_sequence()
855 self.assertIsNotNone(context)
856
857 # Convert value from target endian to integral.
858 p_response = context.get("p_response")
859 self.assertIsNotNone(p_response)
860 self.assertTrue(len(p_response) > 0)
861 self.assertFalse(p_response[0] == "E")
862
863 values[reg_index] = unpack_register_hex_unsigned(endian, p_response)
864
Todd Fialae2202002014-06-27 22:11:56 +0000865 return values
866
867 def add_vCont_query_packets(self):
868 self.test_sequence.add_log_lines([
869 "read packet: $vCont?#00",
870 {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } },
871 ], True)
872
873 def parse_vCont_query_response(self, context):
874 self.assertIsNotNone(context)
875 vCont_query_response = context.get("vCont_query_response")
876
877 # Handle case of no vCont support at all - in which case the capture group will be none or zero length.
878 if not vCont_query_response or len(vCont_query_response) == 0:
879 return {}
880
881 return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0}
882
883 def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"):
884 """Used by single step test that appears in a few different contexts."""
885 single_step_count = 0
886
887 while single_step_count < max_step_count:
888 self.assertIsNotNone(thread_id)
889
890 # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id.
891 step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
892 # print "\nstep_packet created: {}\n".format(step_packet)
893
894 # Single step.
895 self.reset_test_sequence()
896 if use_Hc_packet:
897 self.test_sequence.add_log_lines(
898 [# Set the continue thread.
899 "read packet: $Hc{0:x}#00".format(thread_id),
900 "send packet: $OK#00",
901 ], True)
902 self.test_sequence.add_log_lines([
903 # Single step.
904 step_packet,
905 # "read packet: $vCont;s:{0:x}#00".format(thread_id),
906 # Expect a breakpoint stop report.
907 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} },
908 ], True)
909 context = self.expect_gdbremote_sequence()
910 self.assertIsNotNone(context)
911 self.assertIsNotNone(context.get("stop_signo"))
912 self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP)
913
914 single_step_count += 1
915
916 # See if the predicate is true. If so, we're done.
917 if predicate(args):
918 return (True, single_step_count)
919
920 # The predicate didn't return true within the runaway step count.
921 return (False, single_step_count)
922
923 def g_c1_c2_contents_are(self, args):
924 """Used by single step test that appears in a few different contexts."""
925 g_c1_address = args["g_c1_address"]
926 g_c2_address = args["g_c2_address"]
927 expected_g_c1 = args["expected_g_c1"]
928 expected_g_c2 = args["expected_g_c2"]
929
930 # Read g_c1 and g_c2 contents.
931 self.reset_test_sequence()
932 self.test_sequence.add_log_lines(
933 ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
934 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} },
935 "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
936 {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }],
937 True)
938
939 # Run the packet stream.
940 context = self.expect_gdbremote_sequence()
941 self.assertIsNotNone(context)
942
943 # Check if what we read from inferior memory is what we are expecting.
944 self.assertIsNotNone(context.get("g_c1_contents"))
945 self.assertIsNotNone(context.get("g_c2_contents"))
946
947 return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2)
948
949 def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"):
950 """Used by single step test that appears in a few different contexts."""
951 # Start up the inferior.
952 procs = self.prep_debug_monitor_and_inferior(
953 inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"])
954
955 # Run the process
956 self.test_sequence.add_log_lines(
957 [# Start running after initial stop.
958 "read packet: $c#00",
959 # Match output line that prints the memory address of the function call entry point.
960 # Note we require launch-only testing so we can get inferior otuput.
961 { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
962 "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} },
963 # Now stop the inferior.
964 "read packet: {}".format(chr(03)),
965 # And wait for the stop notification.
966 {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }],
967 True)
968
969 # Run the packet stream.
970 context = self.expect_gdbremote_sequence()
971 self.assertIsNotNone(context)
972
973 # Grab the main thread id.
974 self.assertIsNotNone(context.get("stop_thread_id"))
975 main_thread_id = int(context.get("stop_thread_id"), 16)
976
977 # Grab the function address.
978 self.assertIsNotNone(context.get("function_address"))
979 function_address = int(context.get("function_address"), 16)
980
981 # Grab the data addresses.
982 self.assertIsNotNone(context.get("g_c1_address"))
983 g_c1_address = int(context.get("g_c1_address"), 16)
984
985 self.assertIsNotNone(context.get("g_c2_address"))
986 g_c2_address = int(context.get("g_c2_address"), 16)
987
988 # Set a breakpoint at the given address.
989 # Note this might need to be switched per platform (ARM, mips, etc.).
990 BREAKPOINT_KIND = 1
991 self.reset_test_sequence()
992 self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND)
993 context = self.expect_gdbremote_sequence()
994 self.assertIsNotNone(context)
995
996 # Remove the breakpoint.
997 self.reset_test_sequence()
998 self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND)
999 context = self.expect_gdbremote_sequence()
1000 self.assertIsNotNone(context)
1001
1002 # Verify g_c1 and g_c2 match expected initial state.
1003 args = {}
1004 args["g_c1_address"] = g_c1_address
1005 args["g_c2_address"] = g_c2_address
1006 args["expected_g_c1"] = "0"
1007 args["expected_g_c2"] = "1"
1008
1009 self.assertTrue(self.g_c1_c2_contents_are(args))
1010
1011 # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code.
1012 args["expected_g_c1"] = "1"
1013 args["expected_g_c2"] = "1"
1014 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1015 self.assertTrue(state_reached)
1016
1017 # Verify we hit the next state.
1018 args["expected_g_c1"] = "1"
1019 args["expected_g_c2"] = "0"
1020 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1021 self.assertTrue(state_reached)
1022 self.assertEquals(step_count, 1)
1023
1024 # Verify we hit the next state.
1025 args["expected_g_c1"] = "0"
1026 args["expected_g_c2"] = "0"
1027 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1028 self.assertTrue(state_reached)
1029 self.assertEquals(step_count, 1)
1030
1031 # Verify we hit the next state.
1032 args["expected_g_c1"] = "0"
1033 args["expected_g_c2"] = "1"
1034 (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction)
1035 self.assertTrue(state_reached)
1036 self.assertEquals(step_count, 1)
Todd Fialaaf245d12014-06-30 21:05:18 +00001037