blob: fc9fcec6c076df99ad64825f952b7b299656f266 [file] [log] [blame]
Chris Lattner30fdc8d2010-06-08 16:52:24 +00001//===-- StackFrameList.cpp --------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Eugene Zelenkod70a6e72016-02-18 18:52:47 +000010#include "lldb/Target/StackFrameList.h"
Jim Inghamc6355002012-09-08 00:26:49 +000011#include "lldb/Breakpoint/Breakpoint.h"
Kate Stoneb9c1b512016-09-06 20:57:50 +000012#include "lldb/Breakpoint/BreakpointLocation.h"
Jim Inghamb7f6b2f2011-09-08 22:13:49 +000013#include "lldb/Core/SourceManager.h"
Kate Stoneb9c1b512016-09-06 20:57:50 +000014#include "lldb/Core/StreamFile.h"
Greg Clayton12daf9462010-08-25 00:35:26 +000015#include "lldb/Symbol/Block.h"
16#include "lldb/Symbol/Function.h"
Greg Clayton59e8fc1c2010-08-30 18:11:35 +000017#include "lldb/Symbol/Symbol.h"
Jim Inghamb7f6b2f2011-09-08 22:13:49 +000018#include "lldb/Target/Process.h"
Greg Clayton12daf9462010-08-25 00:35:26 +000019#include "lldb/Target/RegisterContext.h"
Chris Lattner30fdc8d2010-06-08 16:52:24 +000020#include "lldb/Target/StackFrame.h"
Jim Ingham513c6bb2012-09-01 01:02:41 +000021#include "lldb/Target/StopInfo.h"
Jim Inghamb7f6b2f2011-09-08 22:13:49 +000022#include "lldb/Target/Target.h"
Greg Clayton12daf9462010-08-25 00:35:26 +000023#include "lldb/Target/Thread.h"
24#include "lldb/Target/Unwind.h"
Zachary Turner6f9e6902017-03-03 20:56:28 +000025#include "lldb/Utility/Log.h"
Vedant Kumar4b36f792018-10-05 23:23:15 +000026#include "llvm/ADT/SmallPtrSet.h"
Chris Lattner30fdc8d2010-06-08 16:52:24 +000027
Greg Clayton5082c5f2010-08-27 18:24:16 +000028//#define DEBUG_STACK_FRAMES 1
29
Chris Lattner30fdc8d2010-06-08 16:52:24 +000030using namespace lldb;
31using namespace lldb_private;
32
33//----------------------------------------------------------------------
34// StackFrameList constructor
35//----------------------------------------------------------------------
Kate Stoneb9c1b512016-09-06 20:57:50 +000036StackFrameList::StackFrameList(Thread &thread,
37 const lldb::StackFrameListSP &prev_frames_sp,
38 bool show_inline_frames)
39 : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(),
40 m_selected_frame_idx(0), m_concrete_frames_fetched(0),
Saleem Abdulrasoolbb19a132016-05-19 05:13:57 +000041 m_current_inlined_depth(UINT32_MAX),
42 m_current_inlined_pc(LLDB_INVALID_ADDRESS),
Kate Stoneb9c1b512016-09-06 20:57:50 +000043 m_show_inlined_frames(show_inline_frames) {
44 if (prev_frames_sp) {
45 m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth;
46 m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc;
47 }
48}
49
50StackFrameList::~StackFrameList() {
Adrian Prantl05097242018-04-30 16:49:04 +000051 // Call clear since this takes a lock and clears the stack frame list in case
52 // another thread is currently using this stack frame list
Kate Stoneb9c1b512016-09-06 20:57:50 +000053 Clear();
54}
55
56void StackFrameList::CalculateCurrentInlinedDepth() {
57 uint32_t cur_inlined_depth = GetCurrentInlinedDepth();
58 if (cur_inlined_depth == UINT32_MAX) {
59 ResetCurrentInlinedDepth();
60 }
61}
62
63uint32_t StackFrameList::GetCurrentInlinedDepth() {
64 if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) {
65 lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC();
66 if (cur_pc != m_current_inlined_pc) {
67 m_current_inlined_pc = LLDB_INVALID_ADDRESS;
68 m_current_inlined_depth = UINT32_MAX;
69 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
70 if (log && log->GetVerbose())
71 log->Printf(
72 "GetCurrentInlinedDepth: invalidating current inlined depth.\n");
Jim Ingham513c6bb2012-09-01 01:02:41 +000073 }
Kate Stoneb9c1b512016-09-06 20:57:50 +000074 return m_current_inlined_depth;
75 } else {
76 return UINT32_MAX;
77 }
Chris Lattner30fdc8d2010-06-08 16:52:24 +000078}
79
Kate Stoneb9c1b512016-09-06 20:57:50 +000080void StackFrameList::ResetCurrentInlinedDepth() {
Vedant Kumare7167e032018-08-01 17:07:56 +000081 if (!m_show_inlined_frames)
82 return;
83
Kate Stoneb9c1b512016-09-06 20:57:50 +000084 std::lock_guard<std::recursive_mutex> guard(m_mutex);
Chris Lattner30fdc8d2010-06-08 16:52:24 +000085
Vedant Kumare7167e032018-08-01 17:07:56 +000086 GetFramesUpTo(0);
87 if (m_frames.empty())
88 return;
89 if (!m_frames[0]->IsInlined()) {
90 m_current_inlined_depth = UINT32_MAX;
91 m_current_inlined_pc = LLDB_INVALID_ADDRESS;
92 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
93 if (log && log->GetVerbose())
94 log->Printf(
95 "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n");
96 return;
97 }
Jim Ingham513c6bb2012-09-01 01:02:41 +000098
Vedant Kumare7167e032018-08-01 17:07:56 +000099 // We only need to do something special about inlined blocks when we are
100 // at the beginning of an inlined function:
101 // FIXME: We probably also have to do something special if the PC is at
102 // the END of an inlined function, which coincides with the end of either
103 // its containing function or another inlined function.
Kate Stoneb9c1b512016-09-06 20:57:50 +0000104
Vedant Kumare7167e032018-08-01 17:07:56 +0000105 Block *block_ptr = m_frames[0]->GetFrameBlock();
106 if (!block_ptr)
107 return;
Kate Stoneb9c1b512016-09-06 20:57:50 +0000108
Vedant Kumare7167e032018-08-01 17:07:56 +0000109 Address pc_as_address;
110 lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC();
111 pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget()));
112 AddressRange containing_range;
113 if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) ||
114 pc_as_address != containing_range.GetBaseAddress())
115 return;
Kate Stoneb9c1b512016-09-06 20:57:50 +0000116
Vedant Kumare7167e032018-08-01 17:07:56 +0000117 // If we got here because of a breakpoint hit, then set the inlined depth
118 // depending on where the breakpoint was set. If we got here because of a
119 // crash, then set the inlined depth to the deepest most block. Otherwise,
120 // we stopped here naturally as the result of a step, so set ourselves in the
121 // containing frame of the whole set of nested inlines, so the user can then
122 // "virtually" step into the frames one by one, or next over the whole mess.
123 // Note: We don't have to handle being somewhere in the middle of the stack
124 // here, since ResetCurrentInlinedDepth doesn't get called if there is a
125 // valid inlined depth set.
126 StopInfoSP stop_info_sp = m_thread.GetStopInfo();
127 if (!stop_info_sp)
128 return;
129 switch (stop_info_sp->GetStopReason()) {
130 case eStopReasonWatchpoint:
131 case eStopReasonException:
132 case eStopReasonExec:
133 case eStopReasonSignal:
134 // In all these cases we want to stop in the deepest frame.
135 m_current_inlined_pc = curr_pc;
136 m_current_inlined_depth = 0;
137 break;
138 case eStopReasonBreakpoint: {
139 // FIXME: Figure out what this break point is doing, and set the inline
140 // depth appropriately. Be careful to take into account breakpoints that
141 // implement step over prologue, since that should do the default
142 // calculation. For now, if the breakpoints corresponding to this hit are
143 // all internal, I set the stop location to the top of the inlined stack,
144 // since that will make things like stepping over prologues work right.
145 // But if there are any non-internal breakpoints I do to the bottom of the
146 // stack, since that was the old behavior.
147 uint32_t bp_site_id = stop_info_sp->GetValue();
148 BreakpointSiteSP bp_site_sp(
149 m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id));
150 bool all_internal = true;
151 if (bp_site_sp) {
152 uint32_t num_owners = bp_site_sp->GetNumberOfOwners();
153 for (uint32_t i = 0; i < num_owners; i++) {
154 Breakpoint &bp_ref = bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint();
155 if (!bp_ref.IsInternal()) {
156 all_internal = false;
Greg Clayton7bcb93d2013-05-24 00:58:29 +0000157 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000158 }
Jim Ingham3a195b72011-03-31 00:15:49 +0000159 }
Vedant Kumare7167e032018-08-01 17:07:56 +0000160 if (!all_internal) {
161 m_current_inlined_pc = curr_pc;
162 m_current_inlined_depth = 0;
163 break;
164 }
165 }
166 LLVM_FALLTHROUGH;
167 default: {
168 // Otherwise, we should set ourselves at the container of the inlining, so
169 // that the user can descend into them. So first we check whether we have
170 // more than one inlined block sharing this PC:
171 int num_inlined_functions = 0;
172
173 for (Block *container_ptr = block_ptr->GetInlinedParent();
174 container_ptr != nullptr;
175 container_ptr = container_ptr->GetInlinedParent()) {
176 if (!container_ptr->GetRangeContainingAddress(pc_as_address,
177 containing_range))
178 break;
179 if (pc_as_address != containing_range.GetBaseAddress())
180 break;
181
182 num_inlined_functions++;
183 }
184 m_current_inlined_pc = curr_pc;
185 m_current_inlined_depth = num_inlined_functions + 1;
186 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
187 if (log && log->GetVerbose())
188 log->Printf("ResetCurrentInlinedDepth: setting inlined "
189 "depth: %d 0x%" PRIx64 ".\n",
190 m_current_inlined_depth, curr_pc);
191
192 break;
193 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000194 }
Jim Ingham3a195b72011-03-31 00:15:49 +0000195}
Greg Clayton5ccbd292011-01-06 22:15:06 +0000196
Kate Stoneb9c1b512016-09-06 20:57:50 +0000197bool StackFrameList::DecrementCurrentInlinedDepth() {
198 if (m_show_inlined_frames) {
199 uint32_t current_inlined_depth = GetCurrentInlinedDepth();
200 if (current_inlined_depth != UINT32_MAX) {
201 if (current_inlined_depth > 0) {
202 m_current_inlined_depth--;
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000203 return true;
Kate Stoneb9c1b512016-09-06 20:57:50 +0000204 }
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000205 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000206 }
207 return false;
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000208}
209
Kate Stoneb9c1b512016-09-06 20:57:50 +0000210void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) {
211 m_current_inlined_depth = new_depth;
212 if (new_depth == UINT32_MAX)
213 m_current_inlined_pc = LLDB_INVALID_ADDRESS;
214 else
215 m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC();
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000216}
217
Vedant Kumareb8fa582018-08-01 17:07:40 +0000218void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx,
219 Unwind *unwinder) {
220 assert(m_thread.IsValid() && "Expected valid thread");
221 assert(m_frames.size() <= end_idx && "Expected there to be frames to fill");
222
223 if (end_idx < m_concrete_frames_fetched)
224 return;
225
226 if (!unwinder)
227 return;
228
229 uint32_t num_frames = unwinder->GetFramesUpTo(end_idx);
230 if (num_frames <= end_idx + 1) {
231 // Done unwinding.
232 m_concrete_frames_fetched = UINT32_MAX;
233 }
Vedant Kumar33e51b12018-08-07 23:48:25 +0000234
235 // Don't create the frames eagerly. Defer this work to GetFrameAtIndex,
236 // which can lazily query the unwinder to create frames.
Vedant Kumareb8fa582018-08-01 17:07:40 +0000237 m_frames.resize(num_frames);
238}
239
Vedant Kumar4b36f792018-10-05 23:23:15 +0000240/// Find the unique path through the call graph from \p begin (with return PC
241/// \p return_pc) to \p end. On success this path is stored into \p path, and
242/// on failure \p path is unchanged.
243static void FindInterveningFrames(Function &begin, Function &end,
244 Target &target, addr_t return_pc,
245 std::vector<Function *> &path,
246 ModuleList &images, Log *log) {
247 LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}",
248 begin.GetDisplayName(), end.GetDisplayName(), return_pc);
249
250 // Find a non-tail calling edge with the correct return PC.
251 auto first_level_edges = begin.GetCallEdges();
252 if (log)
253 for (const CallEdge &edge : first_level_edges)
254 LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}",
255 edge.GetReturnPCAddress(begin, target));
256 auto first_edge_it = std::lower_bound(
257 first_level_edges.begin(), first_level_edges.end(), return_pc,
258 [&](const CallEdge &edge, addr_t target_pc) {
259 return edge.GetReturnPCAddress(begin, target) < target_pc;
260 });
261 if (first_edge_it == first_level_edges.end() ||
262 first_edge_it->GetReturnPCAddress(begin, target) != return_pc) {
263 LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}",
264 begin.GetDisplayName(), return_pc);
265 return;
266 }
267 CallEdge &first_edge = const_cast<CallEdge &>(*first_edge_it);
268
269 // The first callee may not be resolved, or there may be nothing to fill in.
270 Function *first_callee = first_edge.GetCallee(images);
271 if (!first_callee) {
272 LLDB_LOG(log, "Could not resolve callee");
273 return;
274 }
275 if (first_callee == &end) {
276 LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})",
277 end.GetDisplayName(), return_pc);
278 return;
279 }
280
281 // Run DFS on the tail-calling edges out of the first callee to find \p end.
282 // Fully explore the set of functions reachable from the first edge via tail
283 // calls in order to detect ambiguous executions.
284 struct DFS {
285 std::vector<Function *> active_path = {};
286 std::vector<Function *> solution_path = {};
287 llvm::SmallPtrSet<Function *, 2> visited_nodes = {};
288 bool ambiguous = false;
289 Function *end;
290 ModuleList &images;
291
292 DFS(Function *end, ModuleList &images) : end(end), images(images) {}
293
294 void search(Function *first_callee, std::vector<Function *> &path) {
295 dfs(first_callee);
296 if (!ambiguous)
297 path = std::move(solution_path);
298 }
299
300 void dfs(Function *callee) {
301 // Found a path to the target function.
302 if (callee == end) {
303 if (solution_path.empty())
304 solution_path = active_path;
305 else
306 ambiguous = true;
307 return;
308 }
309
310 // Terminate the search if tail recursion is found, or more generally if
311 // there's more than one way to reach a target. This errs on the side of
312 // caution: it conservatively stops searching when some solutions are
313 // still possible to save time in the average case.
314 if (!visited_nodes.insert(callee).second) {
315 ambiguous = true;
316 return;
317 }
318
319 // Search the calls made from this callee.
320 active_path.push_back(callee);
321 for (CallEdge &edge : callee->GetTailCallingEdges()) {
322 Function *next_callee = edge.GetCallee(images);
323 if (!next_callee)
324 continue;
325
326 dfs(next_callee);
327 if (ambiguous)
328 return;
329 }
330 active_path.pop_back();
331 }
332 };
333
334 DFS(&end, images).search(first_callee, path);
335}
336
337/// Given that \p next_frame will be appended to the frame list, synthesize
338/// tail call frames between the current end of the list and \p next_frame.
339/// If any frames are added, adjust the frame index of \p next_frame.
340///
341/// --------------
342/// | ... | <- Completed frames.
343/// --------------
344/// | prev_frame |
345/// --------------
346/// | ... | <- Artificial frames inserted here.
347/// --------------
348/// | next_frame |
349/// --------------
350/// | ... | <- Not-yet-visited frames.
351/// --------------
352void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) {
353 TargetSP target_sp = next_frame.CalculateTarget();
354 if (!target_sp)
355 return;
356
357 lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext();
358 if (!next_reg_ctx_sp)
359 return;
360
361 Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
362
363 assert(!m_frames.empty() && "Cannot synthesize frames in an empty stack");
364 StackFrame &prev_frame = *m_frames.back().get();
365
366 // Find the functions prev_frame and next_frame are stopped in. The function
367 // objects are needed to search the lazy call graph for intervening frames.
368 Function *prev_func =
369 prev_frame.GetSymbolContext(eSymbolContextFunction).function;
370 if (!prev_func) {
371 LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function");
372 return;
373 }
374 Function *next_func =
375 next_frame.GetSymbolContext(eSymbolContextFunction).function;
376 if (!next_func) {
377 LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function");
378 return;
379 }
380
381 // Try to find the unique sequence of (tail) calls which led from next_frame
382 // to prev_frame.
383 std::vector<Function *> path;
384 addr_t return_pc = next_reg_ctx_sp->GetPC();
385 Target &target = *target_sp.get();
386 ModuleList &images = next_frame.CalculateTarget()->GetImages();
387 FindInterveningFrames(*next_func, *prev_func, target, return_pc, path, images,
388 log);
389
390 // Push synthetic tail call frames.
391 for (Function *callee : llvm::reverse(path)) {
392 uint32_t frame_idx = m_frames.size();
393 uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex();
394 addr_t cfa = LLDB_INVALID_ADDRESS;
395 bool cfa_is_valid = false;
396 addr_t pc =
397 callee->GetAddressRange().GetBaseAddress().GetLoadAddress(&target);
398 SymbolContext sc;
399 callee->CalculateSymbolContext(&sc);
400 auto synth_frame = std::make_shared<StackFrame>(
401 m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa,
402 cfa_is_valid, pc, StackFrame::Kind::Artificial, &sc);
403 m_frames.push_back(synth_frame);
404 LLDB_LOG(log, "Pushed frame {0}", callee->GetDisplayName());
405 }
406
407 // If any frames were created, adjust next_frame's index.
408 if (!path.empty())
409 next_frame.SetFrameIndex(m_frames.size());
410}
411
Kate Stoneb9c1b512016-09-06 20:57:50 +0000412void StackFrameList::GetFramesUpTo(uint32_t end_idx) {
Vedant Kumareb8fa582018-08-01 17:07:40 +0000413 // Do not fetch frames for an invalid thread.
Kate Stoneb9c1b512016-09-06 20:57:50 +0000414 if (!m_thread.IsValid())
415 return;
416
417 // We've already gotten more frames than asked for, or we've already finished
418 // unwinding, return.
419 if (m_frames.size() > end_idx || GetAllFramesFetched())
420 return;
421
422 Unwind *unwinder = m_thread.GetUnwinder();
423
Vedant Kumareb8fa582018-08-01 17:07:40 +0000424 if (!m_show_inlined_frames) {
425 GetOnlyConcreteFramesUpTo(end_idx, unwinder);
426 return;
427 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000428
429#if defined(DEBUG_STACK_FRAMES)
Vedant Kumareb8fa582018-08-01 17:07:40 +0000430 StreamFile s(stdout, false);
Kate Stoneb9c1b512016-09-06 20:57:50 +0000431#endif
Vedant Kumareb8fa582018-08-01 17:07:40 +0000432 // If we are hiding some frames from the outside world, we need to add
433 // those onto the total count of frames to fetch. However, we don't need
434 // to do that if end_idx is 0 since in that case we always get the first
435 // concrete frame and all the inlined frames below it... And of course, if
436 // end_idx is UINT32_MAX that means get all, so just do that...
Kate Stoneb9c1b512016-09-06 20:57:50 +0000437
Vedant Kumareb8fa582018-08-01 17:07:40 +0000438 uint32_t inlined_depth = 0;
439 if (end_idx > 0 && end_idx != UINT32_MAX) {
440 inlined_depth = GetCurrentInlinedDepth();
441 if (inlined_depth != UINT32_MAX) {
442 if (end_idx > 0)
443 end_idx += inlined_depth;
Kate Stoneb9c1b512016-09-06 20:57:50 +0000444 }
445 }
Vedant Kumareb8fa582018-08-01 17:07:40 +0000446
447 StackFrameSP unwind_frame_sp;
448 do {
449 uint32_t idx = m_concrete_frames_fetched++;
450 lldb::addr_t pc = LLDB_INVALID_ADDRESS;
451 lldb::addr_t cfa = LLDB_INVALID_ADDRESS;
452 if (idx == 0) {
453 // We might have already created frame zero, only create it if we need
454 // to.
455 if (m_frames.empty()) {
456 RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext());
457
458 if (reg_ctx_sp) {
459 const bool success =
460 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc);
461 // There shouldn't be any way not to get the frame info for frame
462 // 0. But if the unwinder can't make one, lets make one by hand
463 // with the SP as the CFA and see if that gets any further.
464 if (!success) {
465 cfa = reg_ctx_sp->GetSP();
466 pc = reg_ctx_sp->GetPC();
467 }
468
469 unwind_frame_sp.reset(new StackFrame(m_thread.shared_from_this(),
470 m_frames.size(), idx, reg_ctx_sp,
471 cfa, pc, nullptr));
472 m_frames.push_back(unwind_frame_sp);
473 }
474 } else {
475 unwind_frame_sp = m_frames.front();
476 cfa = unwind_frame_sp->m_id.GetCallFrameAddress();
477 }
478 } else {
479 const bool success =
480 unwinder && unwinder->GetFrameInfoAtIndex(idx, cfa, pc);
481 if (!success) {
482 // We've gotten to the end of the stack.
483 SetAllFramesFetched();
484 break;
485 }
486 const bool cfa_is_valid = true;
Vedant Kumar4b36f792018-10-05 23:23:15 +0000487 unwind_frame_sp.reset(
488 new StackFrame(m_thread.shared_from_this(), m_frames.size(), idx, cfa,
489 cfa_is_valid, pc, StackFrame::Kind::Regular, nullptr));
490
491 // Create synthetic tail call frames between the previous frame and the
492 // newly-found frame. The new frame's index may change after this call,
493 // although its concrete index will stay the same.
494 SynthesizeTailCallFrames(*unwind_frame_sp.get());
495
Vedant Kumareb8fa582018-08-01 17:07:40 +0000496 m_frames.push_back(unwind_frame_sp);
497 }
498
499 assert(unwind_frame_sp);
500 SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext(
501 eSymbolContextBlock | eSymbolContextFunction);
502 Block *unwind_block = unwind_sc.block;
503 if (unwind_block) {
504 Address curr_frame_address(unwind_frame_sp->GetFrameCodeAddress());
505 TargetSP target_sp = m_thread.CalculateTarget();
506 // Be sure to adjust the frame address to match the address that was
507 // used to lookup the symbol context above. If we are in the first
508 // concrete frame, then we lookup using the current address, else we
509 // decrement the address by one to get the correct location.
510 if (idx > 0) {
511 if (curr_frame_address.GetOffset() == 0) {
512 // If curr_frame_address points to the first address in a section
513 // then after adjustment it will point to an other section. In that
514 // case resolve the address again to the correct section plus
515 // offset form.
516 addr_t load_addr = curr_frame_address.GetOpcodeLoadAddress(
517 target_sp.get(), AddressClass::eCode);
518 curr_frame_address.SetOpcodeLoadAddress(
519 load_addr - 1, target_sp.get(), AddressClass::eCode);
520 } else {
521 curr_frame_address.Slide(-1);
522 }
523 }
524
525 SymbolContext next_frame_sc;
526 Address next_frame_address;
527
528 while (unwind_sc.GetParentOfInlinedScope(
529 curr_frame_address, next_frame_sc, next_frame_address)) {
530 next_frame_sc.line_entry.ApplyFileMappings(target_sp);
531 StackFrameSP frame_sp(
532 new StackFrame(m_thread.shared_from_this(), m_frames.size(), idx,
533 unwind_frame_sp->GetRegisterContextSP(), cfa,
534 next_frame_address, &next_frame_sc));
535
536 m_frames.push_back(frame_sp);
537 unwind_sc = next_frame_sc;
538 curr_frame_address = next_frame_address;
539 }
540 }
541 } while (m_frames.size() - 1 < end_idx);
542
543 // Don't try to merge till you've calculated all the frames in this stack.
544 if (GetAllFramesFetched() && m_prev_frames_sp) {
545 StackFrameList *prev_frames = m_prev_frames_sp.get();
546 StackFrameList *curr_frames = this;
547
548#if defined(DEBUG_STACK_FRAMES)
549 s.PutCString("\nprev_frames:\n");
550 prev_frames->Dump(&s);
551 s.PutCString("\ncurr_frames:\n");
552 curr_frames->Dump(&s);
553 s.EOL();
554#endif
555 size_t curr_frame_num, prev_frame_num;
556
557 for (curr_frame_num = curr_frames->m_frames.size(),
558 prev_frame_num = prev_frames->m_frames.size();
559 curr_frame_num > 0 && prev_frame_num > 0;
560 --curr_frame_num, --prev_frame_num) {
561 const size_t curr_frame_idx = curr_frame_num - 1;
562 const size_t prev_frame_idx = prev_frame_num - 1;
563 StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]);
564 StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]);
565
566#if defined(DEBUG_STACK_FRAMES)
567 s.Printf("\n\nCurr frame #%u ", curr_frame_idx);
568 if (curr_frame_sp)
569 curr_frame_sp->Dump(&s, true, false);
570 else
571 s.PutCString("NULL");
572 s.Printf("\nPrev frame #%u ", prev_frame_idx);
573 if (prev_frame_sp)
574 prev_frame_sp->Dump(&s, true, false);
575 else
576 s.PutCString("NULL");
577#endif
578
579 StackFrame *curr_frame = curr_frame_sp.get();
580 StackFrame *prev_frame = prev_frame_sp.get();
581
582 if (curr_frame == nullptr || prev_frame == nullptr)
583 break;
584
585 // Check the stack ID to make sure they are equal.
586 if (curr_frame->GetStackID() != prev_frame->GetStackID())
587 break;
588
589 prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame);
590 // Now copy the fixed up previous frame into the current frames so the
591 // pointer doesn't change.
592 m_frames[curr_frame_idx] = prev_frame_sp;
593
594#if defined(DEBUG_STACK_FRAMES)
595 s.Printf("\n Copying previous frame to current frame");
596#endif
597 }
598 // We are done with the old stack frame list, we can release it now.
599 m_prev_frames_sp.reset();
600 }
601
602#if defined(DEBUG_STACK_FRAMES)
603 s.PutCString("\n\nNew frames:\n");
604 Dump(&s);
605 s.EOL();
606#endif
Kate Stoneb9c1b512016-09-06 20:57:50 +0000607}
608
609uint32_t StackFrameList::GetNumFrames(bool can_create) {
610 std::lock_guard<std::recursive_mutex> guard(m_mutex);
611
612 if (can_create)
613 GetFramesUpTo(UINT32_MAX);
614
Vedant Kumarb3b7b1b2018-08-01 17:08:11 +0000615 return GetVisibleStackFrameIndex(m_frames.size());
Kate Stoneb9c1b512016-09-06 20:57:50 +0000616}
617
618void StackFrameList::Dump(Stream *s) {
619 if (s == nullptr)
620 return;
621
622 std::lock_guard<std::recursive_mutex> guard(m_mutex);
623
624 const_iterator pos, begin = m_frames.begin(), end = m_frames.end();
625 for (pos = begin; pos != end; ++pos) {
626 StackFrame *frame = (*pos).get();
627 s->Printf("%p: ", static_cast<void *>(frame));
628 if (frame) {
629 frame->GetStackID().Dump(s);
630 frame->DumpUsingSettingsFormat(s);
631 } else
632 s->Printf("frame #%u", (uint32_t)std::distance(begin, pos));
633 s->EOL();
634 }
635 s->EOL();
636}
637
638StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) {
639 StackFrameSP frame_sp;
640 std::lock_guard<std::recursive_mutex> guard(m_mutex);
641 uint32_t original_idx = idx;
642
643 uint32_t inlined_depth = GetCurrentInlinedDepth();
644 if (inlined_depth != UINT32_MAX)
645 idx += inlined_depth;
646
647 if (idx < m_frames.size())
648 frame_sp = m_frames[idx];
649
650 if (frame_sp)
651 return frame_sp;
652
Adrian Prantl05097242018-04-30 16:49:04 +0000653 // GetFramesUpTo will fill m_frames with as many frames as you asked for, if
654 // there are that many. If there weren't then you asked for too many frames.
Kate Stoneb9c1b512016-09-06 20:57:50 +0000655 GetFramesUpTo(idx);
656 if (idx < m_frames.size()) {
657 if (m_show_inlined_frames) {
658 // When inline frames are enabled we actually create all the frames in
659 // GetFramesUpTo.
660 frame_sp = m_frames[idx];
661 } else {
662 Unwind *unwinder = m_thread.GetUnwinder();
663 if (unwinder) {
664 addr_t pc, cfa;
665 if (unwinder->GetFrameInfoAtIndex(idx, cfa, pc)) {
666 const bool cfa_is_valid = true;
Vedant Kumar4b36f792018-10-05 23:23:15 +0000667 frame_sp.reset(new StackFrame(m_thread.shared_from_this(), idx, idx,
668 cfa, cfa_is_valid, pc,
669 StackFrame::Kind::Regular, nullptr));
Kate Stoneb9c1b512016-09-06 20:57:50 +0000670
671 Function *function =
672 frame_sp->GetSymbolContext(eSymbolContextFunction).function;
673 if (function) {
Adrian Prantl05097242018-04-30 16:49:04 +0000674 // When we aren't showing inline functions we always use the top
675 // most function block as the scope.
Kate Stoneb9c1b512016-09-06 20:57:50 +0000676 frame_sp->SetSymbolContextScope(&function->GetBlock(false));
677 } else {
678 // Set the symbol scope from the symbol regardless if it is nullptr
679 // or valid.
680 frame_sp->SetSymbolContextScope(
681 frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol);
682 }
683 SetFrameAtIndex(idx, frame_sp);
684 }
685 }
686 }
687 } else if (original_idx == 0) {
688 // There should ALWAYS be a frame at index 0. If something went wrong with
Adrian Prantl05097242018-04-30 16:49:04 +0000689 // the CurrentInlinedDepth such that there weren't as many frames as we
690 // thought taking that into account, then reset the current inlined depth
Kate Stoneb9c1b512016-09-06 20:57:50 +0000691 // and return the real zeroth frame.
692 if (m_frames.empty()) {
693 // Why do we have a thread with zero frames, that should not ever
694 // happen...
David Blaikiea322f362017-01-06 00:38:06 +0000695 assert(!m_thread.IsValid() && "A valid thread has no frames.");
Kate Stoneb9c1b512016-09-06 20:57:50 +0000696 } else {
697 ResetCurrentInlinedDepth();
698 frame_sp = m_frames[original_idx];
699 }
700 }
701
702 return frame_sp;
703}
704
705StackFrameSP
706StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) {
707 // First try assuming the unwind index is the same as the frame index. The
Adrian Prantl05097242018-04-30 16:49:04 +0000708 // unwind index is always greater than or equal to the frame index, so it is
709 // a good place to start. If we have inlined frames we might have 5 concrete
710 // frames (frame unwind indexes go from 0-4), but we might have 15 frames
711 // after we make all the inlined frames. Most of the time the unwind frame
712 // index (or the concrete frame index) is the same as the frame index.
Kate Stoneb9c1b512016-09-06 20:57:50 +0000713 uint32_t frame_idx = unwind_idx;
714 StackFrameSP frame_sp(GetFrameAtIndex(frame_idx));
715 while (frame_sp) {
716 if (frame_sp->GetFrameIndex() == unwind_idx)
717 break;
718 frame_sp = GetFrameAtIndex(++frame_idx);
719 }
720 return frame_sp;
721}
722
723static bool CompareStackID(const StackFrameSP &stack_sp,
724 const StackID &stack_id) {
725 return stack_sp->GetStackID() < stack_id;
726}
727
728StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) {
729 StackFrameSP frame_sp;
730
731 if (stack_id.IsValid()) {
732 std::lock_guard<std::recursive_mutex> guard(m_mutex);
733 uint32_t frame_idx = 0;
734 // Do a binary search in case the stack frame is already in our cache
735 collection::const_iterator begin = m_frames.begin();
736 collection::const_iterator end = m_frames.end();
737 if (begin != end) {
738 collection::const_iterator pos =
739 std::lower_bound(begin, end, stack_id, CompareStackID);
740 if (pos != end) {
741 if ((*pos)->GetStackID() == stack_id)
742 return *pos;
743 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000744 }
745 do {
746 frame_sp = GetFrameAtIndex(frame_idx);
747 if (frame_sp && frame_sp->GetStackID() == stack_id)
748 break;
749 frame_idx++;
750 } while (frame_sp);
751 }
752 return frame_sp;
753}
754
755bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) {
756 if (idx >= m_frames.size())
757 m_frames.resize(idx + 1);
758 // Make sure allocation succeeded by checking bounds again
759 if (idx < m_frames.size()) {
760 m_frames[idx] = frame_sp;
761 return true;
762 }
763 return false; // resize failed, out of memory?
764}
765
766uint32_t StackFrameList::GetSelectedFrameIndex() const {
767 std::lock_guard<std::recursive_mutex> guard(m_mutex);
768 return m_selected_frame_idx;
769}
770
771uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) {
772 std::lock_guard<std::recursive_mutex> guard(m_mutex);
773 const_iterator pos;
774 const_iterator begin = m_frames.begin();
775 const_iterator end = m_frames.end();
776 m_selected_frame_idx = 0;
777 for (pos = begin; pos != end; ++pos) {
778 if (pos->get() == frame) {
779 m_selected_frame_idx = std::distance(begin, pos);
780 uint32_t inlined_depth = GetCurrentInlinedDepth();
781 if (inlined_depth != UINT32_MAX)
782 m_selected_frame_idx -= inlined_depth;
783 break;
784 }
785 }
786 SetDefaultFileAndLineToSelectedFrame();
787 return m_selected_frame_idx;
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000788}
789
Kate Stoneb9c1b512016-09-06 20:57:50 +0000790bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) {
791 std::lock_guard<std::recursive_mutex> guard(m_mutex);
792 StackFrameSP frame_sp(GetFrameAtIndex(idx));
793 if (frame_sp) {
794 SetSelectedFrame(frame_sp.get());
795 return true;
796 } else
797 return false;
Jim Inghamb7f6b2f2011-09-08 22:13:49 +0000798}
799
Kate Stoneb9c1b512016-09-06 20:57:50 +0000800void StackFrameList::SetDefaultFileAndLineToSelectedFrame() {
801 if (m_thread.GetID() ==
802 m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) {
803 StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex()));
804 if (frame_sp) {
805 SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry);
806 if (sc.line_entry.file)
807 m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine(
808 sc.line_entry.file, sc.line_entry.line);
Jim Inghamb7f6b2f2011-09-08 22:13:49 +0000809 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000810 }
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000811}
812
813// The thread has been run, reset the number stack frames to zero so we can
814// determine how many frames we have lazily.
Kate Stoneb9c1b512016-09-06 20:57:50 +0000815void StackFrameList::Clear() {
816 std::lock_guard<std::recursive_mutex> guard(m_mutex);
817 m_frames.clear();
818 m_concrete_frames_fetched = 0;
Chris Lattner30fdc8d2010-06-08 16:52:24 +0000819}
820
Kate Stoneb9c1b512016-09-06 20:57:50 +0000821void StackFrameList::Merge(std::unique_ptr<StackFrameList> &curr_ap,
822 lldb::StackFrameListSP &prev_sp) {
823 std::unique_lock<std::recursive_mutex> current_lock, previous_lock;
824 if (curr_ap)
825 current_lock = std::unique_lock<std::recursive_mutex>(curr_ap->m_mutex);
826 if (prev_sp)
827 previous_lock = std::unique_lock<std::recursive_mutex>(prev_sp->m_mutex);
Greg Clayton2cad65a2010-09-03 17:10:42 +0000828
Kate Stoneb9c1b512016-09-06 20:57:50 +0000829#if defined(DEBUG_STACK_FRAMES)
830 StreamFile s(stdout, false);
831 s.PutCString("\n\nStackFrameList::Merge():\nPrev:\n");
832 if (prev_sp)
833 prev_sp->Dump(&s);
834 else
835 s.PutCString("NULL");
836 s.PutCString("\nCurr:\n");
837 if (curr_ap)
838 curr_ap->Dump(&s);
839 else
840 s.PutCString("NULL");
841 s.EOL();
Greg Clayton2cad65a2010-09-03 17:10:42 +0000842#endif
843
Kate Stoneb9c1b512016-09-06 20:57:50 +0000844 if (!curr_ap || curr_ap->GetNumFrames(false) == 0) {
845#if defined(DEBUG_STACK_FRAMES)
846 s.PutCString("No current frames, leave previous frames alone...\n");
Greg Clayton2cad65a2010-09-03 17:10:42 +0000847#endif
Greg Clayton2cad65a2010-09-03 17:10:42 +0000848 curr_ap.release();
Kate Stoneb9c1b512016-09-06 20:57:50 +0000849 return;
850 }
Greg Clayton2cad65a2010-09-03 17:10:42 +0000851
Kate Stoneb9c1b512016-09-06 20:57:50 +0000852 if (!prev_sp || prev_sp->GetNumFrames(false) == 0) {
853#if defined(DEBUG_STACK_FRAMES)
854 s.PutCString("No previous frames, so use current frames...\n");
855#endif
Adrian Prantl05097242018-04-30 16:49:04 +0000856 // We either don't have any previous frames, or since we have more than one
857 // current frames it means we have all the frames and can safely replace
858 // our previous frames.
Kate Stoneb9c1b512016-09-06 20:57:50 +0000859 prev_sp.reset(curr_ap.release());
860 return;
861 }
862
863 const uint32_t num_curr_frames = curr_ap->GetNumFrames(false);
864
865 if (num_curr_frames > 1) {
866#if defined(DEBUG_STACK_FRAMES)
867 s.PutCString(
868 "We have more than one current frame, so use current frames...\n");
869#endif
Adrian Prantl05097242018-04-30 16:49:04 +0000870 // We have more than one current frames it means we have all the frames and
871 // can safely replace our previous frames.
Kate Stoneb9c1b512016-09-06 20:57:50 +0000872 prev_sp.reset(curr_ap.release());
873
874#if defined(DEBUG_STACK_FRAMES)
Greg Clayton2cad65a2010-09-03 17:10:42 +0000875 s.PutCString("\nMerged:\n");
Kate Stoneb9c1b512016-09-06 20:57:50 +0000876 prev_sp->Dump(&s);
877#endif
878 return;
879 }
880
881 StackFrameSP prev_frame_zero_sp(prev_sp->GetFrameAtIndex(0));
882 StackFrameSP curr_frame_zero_sp(curr_ap->GetFrameAtIndex(0));
883 StackID curr_stack_id(curr_frame_zero_sp->GetStackID());
884 StackID prev_stack_id(prev_frame_zero_sp->GetStackID());
885
886#if defined(DEBUG_STACK_FRAMES)
887 const uint32_t num_prev_frames = prev_sp->GetNumFrames(false);
888 s.Printf("\n%u previous frames with one current frame\n", num_prev_frames);
889#endif
890
891 // We have only a single current frame
892 // Our previous stack frames only had a single frame as well...
893 if (curr_stack_id == prev_stack_id) {
894#if defined(DEBUG_STACK_FRAMES)
895 s.Printf("\nPrevious frame #0 is same as current frame #0, merge the "
896 "cached data\n");
897#endif
898
899 curr_frame_zero_sp->UpdateCurrentFrameFromPreviousFrame(
900 *prev_frame_zero_sp);
901 // prev_frame_zero_sp->UpdatePreviousFrameFromCurrentFrame
902 // (*curr_frame_zero_sp);
903 // prev_sp->SetFrameAtIndex (0, prev_frame_zero_sp);
904 } else if (curr_stack_id < prev_stack_id) {
905#if defined(DEBUG_STACK_FRAMES)
906 s.Printf("\nCurrent frame #0 has a stack ID that is less than the previous "
907 "frame #0, insert current frame zero in front of previous\n");
908#endif
909 prev_sp->m_frames.insert(prev_sp->m_frames.begin(), curr_frame_zero_sp);
910 }
911
912 curr_ap.release();
913
914#if defined(DEBUG_STACK_FRAMES)
915 s.PutCString("\nMerged:\n");
916 prev_sp->Dump(&s);
Greg Clayton2cad65a2010-09-03 17:10:42 +0000917#endif
Greg Clayton2cad65a2010-09-03 17:10:42 +0000918}
Jim Inghame4284b72010-09-23 17:40:12 +0000919
Jason Molendab57e4a12013-11-04 09:33:30 +0000920lldb::StackFrameSP
Kate Stoneb9c1b512016-09-06 20:57:50 +0000921StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) {
922 const_iterator pos;
923 const_iterator begin = m_frames.begin();
924 const_iterator end = m_frames.end();
925 lldb::StackFrameSP ret_sp;
926
927 for (pos = begin; pos != end; ++pos) {
928 if (pos->get() == stack_frame_ptr) {
929 ret_sp = (*pos);
930 break;
Jim Inghame4284b72010-09-23 17:40:12 +0000931 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000932 }
933 return ret_sp;
Jim Inghame4284b72010-09-23 17:40:12 +0000934}
935
Kate Stoneb9c1b512016-09-06 20:57:50 +0000936size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame,
937 uint32_t num_frames, bool show_frame_info,
938 uint32_t num_frames_with_source,
Pavel Labath7f1c1212017-06-12 16:25:24 +0000939 bool show_unique,
Kate Stoneb9c1b512016-09-06 20:57:50 +0000940 const char *selected_frame_marker) {
941 size_t num_frames_displayed = 0;
942
943 if (num_frames == 0)
944 return 0;
945
946 StackFrameSP frame_sp;
947 uint32_t frame_idx = 0;
948 uint32_t last_frame;
949
950 // Don't let the last frame wrap around...
951 if (num_frames == UINT32_MAX)
952 last_frame = UINT32_MAX;
953 else
954 last_frame = first_frame + num_frames;
955
956 StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame();
957 const char *unselected_marker = nullptr;
958 std::string buffer;
959 if (selected_frame_marker) {
960 size_t len = strlen(selected_frame_marker);
961 buffer.insert(buffer.begin(), len, ' ');
962 unselected_marker = buffer.c_str();
963 }
964 const char *marker = nullptr;
965
966 for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) {
967 frame_sp = GetFrameAtIndex(frame_idx);
968 if (!frame_sp)
969 break;
970
971 if (selected_frame_marker != nullptr) {
972 if (frame_sp == selected_frame_sp)
973 marker = selected_frame_marker;
974 else
975 marker = unselected_marker;
Jim Ingham8ec10ef2013-10-18 17:38:31 +0000976 }
Kate Stoneb9c1b512016-09-06 20:57:50 +0000977
978 if (!frame_sp->GetStatus(strm, show_frame_info,
979 num_frames_with_source > (first_frame - frame_idx),
Pavel Labath7f1c1212017-06-12 16:25:24 +0000980 show_unique, marker))
Kate Stoneb9c1b512016-09-06 20:57:50 +0000981 break;
982 ++num_frames_displayed;
983 }
984
985 strm.IndentLess();
986 return num_frames_displayed;
Greg Clayton7260f622011-04-18 08:33:37 +0000987}