| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 1 | //===-- FuncUnwinders.cpp ----------------------------------*- C++ -*-===// | 
|  | 2 | // | 
|  | 3 | //                     The LLVM Compiler Infrastructure | 
|  | 4 | // | 
|  | 5 | // This file is distributed under the University of Illinois Open Source | 
|  | 6 | // License. See LICENSE.TXT for details. | 
|  | 7 | // | 
|  | 8 | //===----------------------------------------------------------------------===// | 
|  | 9 |  | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 10 | #include "lldb/Core/AddressRange.h" | 
|  | 11 | #include "lldb/Core/Address.h" | 
| Greg Clayton | dc5eb69 | 2011-04-25 18:36:36 +0000 | [diff] [blame] | 12 | #include "lldb/Symbol/FuncUnwinders.h" | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 13 | #include "lldb/Symbol/DWARFCallFrameInfo.h" | 
| Greg Clayton | dc5eb69 | 2011-04-25 18:36:36 +0000 | [diff] [blame] | 14 | #include "lldb/Symbol/ObjectFile.h" | 
|  | 15 | #include "lldb/Symbol/UnwindPlan.h" | 
|  | 16 | #include "lldb/Symbol/UnwindTable.h" | 
| Greg Clayton | 31f1d2f | 2011-05-11 18:39:18 +0000 | [diff] [blame] | 17 | #include "lldb/Target/ABI.h" | 
| Greg Clayton | 1ac04c3 | 2012-02-21 00:09:25 +0000 | [diff] [blame] | 18 | #include "lldb/Target/ExecutionContext.h" | 
| Greg Clayton | 31f1d2f | 2011-05-11 18:39:18 +0000 | [diff] [blame] | 19 | #include "lldb/Target/Process.h" | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 20 | #include "lldb/Target/Thread.h" | 
|  | 21 | #include "lldb/Target/Target.h" | 
| Greg Clayton | 7be2542 | 2011-04-25 21:14:26 +0000 | [diff] [blame] | 22 | #include "lldb/Target/UnwindAssembly.h" | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 23 |  | 
|  | 24 | using namespace lldb; | 
|  | 25 | using namespace lldb_private; | 
|  | 26 |  | 
|  | 27 |  | 
| Greg Clayton | b0848c5 | 2011-01-08 00:05:12 +0000 | [diff] [blame] | 28 | FuncUnwinders::FuncUnwinders | 
|  | 29 | ( | 
|  | 30 | UnwindTable& unwind_table, | 
| Greg Clayton | b0848c5 | 2011-01-08 00:05:12 +0000 | [diff] [blame] | 31 | AddressRange range | 
|  | 32 | ) : | 
|  | 33 | m_unwind_table(unwind_table), | 
| Greg Clayton | b0848c5 | 2011-01-08 00:05:12 +0000 | [diff] [blame] | 34 | m_range(range), | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 35 | m_mutex (Mutex::eMutexTypeNormal), | 
| Greg Clayton | e576ab2 | 2011-02-15 00:19:15 +0000 | [diff] [blame] | 36 | m_unwind_plan_call_site_sp (), | 
|  | 37 | m_unwind_plan_non_call_site_sp (), | 
|  | 38 | m_unwind_plan_fast_sp (), | 
|  | 39 | m_unwind_plan_arch_default_sp (), | 
| Greg Clayton | b0848c5 | 2011-01-08 00:05:12 +0000 | [diff] [blame] | 40 | m_tried_unwind_at_call_site (false), | 
|  | 41 | m_tried_unwind_at_non_call_site (false), | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 42 | m_tried_unwind_fast (false), | 
|  | 43 | m_tried_unwind_arch_default (false), | 
| Jason Molenda | 995cd3a | 2011-09-15 00:44:34 +0000 | [diff] [blame] | 44 | m_tried_unwind_arch_default_at_func_entry (false), | 
| Greg Clayton | b0848c5 | 2011-01-08 00:05:12 +0000 | [diff] [blame] | 45 | m_first_non_prologue_insn() | 
|  | 46 | { | 
|  | 47 | } | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 48 |  | 
|  | 49 | FuncUnwinders::~FuncUnwinders () | 
|  | 50 | { | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 51 | } | 
|  | 52 |  | 
| Greg Clayton | e576ab2 | 2011-02-15 00:19:15 +0000 | [diff] [blame] | 53 | UnwindPlanSP | 
| Jason Molenda | cabd1b7 | 2010-11-12 05:23:10 +0000 | [diff] [blame] | 54 | FuncUnwinders::GetUnwindPlanAtCallSite (int current_offset) | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 55 | { | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 56 | // Lock the mutex to ensure we can always give out the most appropriate | 
|  | 57 | // information. We want to make sure if someone requests a call site unwind | 
|  | 58 | // plan, that they get one and don't run into a race condition where one | 
|  | 59 | // thread has started to create the unwind plan and has put it into | 
| Greg Clayton | e576ab2 | 2011-02-15 00:19:15 +0000 | [diff] [blame] | 60 | // m_unwind_plan_call_site_sp, and have another thread enter this function | 
|  | 61 | // and return the partially filled in m_unwind_plan_call_site_sp pointer. | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 62 | // We also want to make sure that we lock out other unwind plans from | 
|  | 63 | // being accessed until this one is done creating itself in case someone | 
|  | 64 | // had some code like: | 
|  | 65 | //  UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...) | 
|  | 66 | //  if (best_unwind_plan == NULL) | 
|  | 67 | //      best_unwind_plan = GetUnwindPlanAtNonCallSite (...) | 
|  | 68 | Mutex::Locker locker (m_mutex); | 
| Ed Maste | d4612ad | 2014-04-20 13:17:36 +0000 | [diff] [blame] | 69 | if (m_tried_unwind_at_call_site == false && m_unwind_plan_call_site_sp.get() == nullptr) | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 70 | { | 
| Greg Clayton | b0848c5 | 2011-01-08 00:05:12 +0000 | [diff] [blame] | 71 | m_tried_unwind_at_call_site = true; | 
|  | 72 | // We have cases (e.g. with _sigtramp on Mac OS X) where the hand-written eh_frame unwind info for a | 
|  | 73 | // function does not cover the entire range of the function and so the FDE only lists a subset of the | 
|  | 74 | // address range.  If we try to look up the unwind info by the starting address of the function | 
|  | 75 | // (i.e. m_range.GetBaseAddress()) we may not find the eh_frame FDE.  We need to use the actual byte offset | 
|  | 76 | // into the function when looking it up. | 
|  | 77 |  | 
|  | 78 | if (m_range.GetBaseAddress().IsValid()) | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 79 | { | 
| Greg Clayton | b0848c5 | 2011-01-08 00:05:12 +0000 | [diff] [blame] | 80 | Address current_pc (m_range.GetBaseAddress ()); | 
|  | 81 | if (current_offset != -1) | 
|  | 82 | current_pc.SetOffset (current_pc.GetOffset() + current_offset); | 
|  | 83 |  | 
|  | 84 | DWARFCallFrameInfo *eh_frame = m_unwind_table.GetEHFrameInfo(); | 
|  | 85 | if (eh_frame) | 
|  | 86 | { | 
| Greg Clayton | 31f1d2f | 2011-05-11 18:39:18 +0000 | [diff] [blame] | 87 | m_unwind_plan_call_site_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric)); | 
| Greg Clayton | e576ab2 | 2011-02-15 00:19:15 +0000 | [diff] [blame] | 88 | if (!eh_frame->GetUnwindPlan (current_pc, *m_unwind_plan_call_site_sp)) | 
|  | 89 | m_unwind_plan_call_site_sp.reset(); | 
| Greg Clayton | b0848c5 | 2011-01-08 00:05:12 +0000 | [diff] [blame] | 90 | } | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 91 | } | 
|  | 92 | } | 
| Greg Clayton | e576ab2 | 2011-02-15 00:19:15 +0000 | [diff] [blame] | 93 | return m_unwind_plan_call_site_sp; | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 94 | } | 
|  | 95 |  | 
| Greg Clayton | e576ab2 | 2011-02-15 00:19:15 +0000 | [diff] [blame] | 96 | UnwindPlanSP | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 97 | FuncUnwinders::GetUnwindPlanAtNonCallSite (Thread& thread) | 
|  | 98 | { | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 99 | // Lock the mutex to ensure we can always give out the most appropriate | 
|  | 100 | // information. We want to make sure if someone requests an unwind | 
|  | 101 | // plan, that they get one and don't run into a race condition where one | 
|  | 102 | // thread has started to create the unwind plan and has put it into | 
| Greg Clayton | e01e07b | 2013-04-18 18:10:51 +0000 | [diff] [blame] | 103 | // the unique pointer member variable, and have another thread enter this function | 
|  | 104 | // and return the partially filled pointer contained in the unique pointer. | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 105 | // We also want to make sure that we lock out other unwind plans from | 
|  | 106 | // being accessed until this one is done creating itself in case someone | 
|  | 107 | // had some code like: | 
|  | 108 | //  UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...) | 
|  | 109 | //  if (best_unwind_plan == NULL) | 
|  | 110 | //      best_unwind_plan = GetUnwindPlanAtNonCallSite (...) | 
|  | 111 | Mutex::Locker locker (m_mutex); | 
| Ed Maste | d4612ad | 2014-04-20 13:17:36 +0000 | [diff] [blame] | 112 | if (m_tried_unwind_at_non_call_site == false && m_unwind_plan_non_call_site_sp.get() == nullptr) | 
| Jason Molenda | ab4f192 | 2010-10-25 11:12:07 +0000 | [diff] [blame] | 113 | { | 
| Jason Molenda | ab35aa9 | 2014-05-23 01:48:10 +0000 | [diff] [blame] | 114 | UnwindAssemblySP assembly_profiler_sp (GetUnwindAssemblyProfiler()); | 
|  | 115 | if (assembly_profiler_sp) | 
| Jason Molenda | 2cd21b8 | 2013-09-24 02:42:54 +0000 | [diff] [blame] | 116 | { | 
|  | 117 | m_unwind_plan_non_call_site_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric)); | 
| Jason Molenda | ab35aa9 | 2014-05-23 01:48:10 +0000 | [diff] [blame] | 118 | if (!assembly_profiler_sp->GetNonCallSiteUnwindPlanFromAssembly (m_range, thread, *m_unwind_plan_non_call_site_sp)) | 
| Jason Molenda | 2cd21b8 | 2013-09-24 02:42:54 +0000 | [diff] [blame] | 119 | m_unwind_plan_non_call_site_sp.reset(); | 
|  | 120 | } | 
| Jason Molenda | ab4f192 | 2010-10-25 11:12:07 +0000 | [diff] [blame] | 121 | } | 
| Greg Clayton | e576ab2 | 2011-02-15 00:19:15 +0000 | [diff] [blame] | 122 | return m_unwind_plan_non_call_site_sp; | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 123 | } | 
|  | 124 |  | 
| Greg Clayton | e576ab2 | 2011-02-15 00:19:15 +0000 | [diff] [blame] | 125 | UnwindPlanSP | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 126 | FuncUnwinders::GetUnwindPlanFastUnwind (Thread& thread) | 
|  | 127 | { | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 128 | // Lock the mutex to ensure we can always give out the most appropriate | 
|  | 129 | // information. We want to make sure if someone requests an unwind | 
|  | 130 | // plan, that they get one and don't run into a race condition where one | 
|  | 131 | // thread has started to create the unwind plan and has put it into | 
| Greg Clayton | e01e07b | 2013-04-18 18:10:51 +0000 | [diff] [blame] | 132 | // the unique pointer member variable, and have another thread enter this function | 
|  | 133 | // and return the partially filled pointer contained in the unique pointer. | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 134 | // We also want to make sure that we lock out other unwind plans from | 
|  | 135 | // being accessed until this one is done creating itself in case someone | 
|  | 136 | // had some code like: | 
|  | 137 | //  UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...) | 
|  | 138 | //  if (best_unwind_plan == NULL) | 
|  | 139 | //      best_unwind_plan = GetUnwindPlanAtNonCallSite (...) | 
|  | 140 | Mutex::Locker locker (m_mutex); | 
| Ed Maste | d4612ad | 2014-04-20 13:17:36 +0000 | [diff] [blame] | 141 | if (m_tried_unwind_fast == false && m_unwind_plan_fast_sp.get() == nullptr) | 
| Jason Molenda | ab4f192 | 2010-10-25 11:12:07 +0000 | [diff] [blame] | 142 | { | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 143 | m_tried_unwind_fast = true; | 
| Jason Molenda | ab35aa9 | 2014-05-23 01:48:10 +0000 | [diff] [blame] | 144 | UnwindAssemblySP assembly_profiler_sp (GetUnwindAssemblyProfiler()); | 
|  | 145 | if (assembly_profiler_sp) | 
| Jason Molenda | 2cd21b8 | 2013-09-24 02:42:54 +0000 | [diff] [blame] | 146 | { | 
|  | 147 | m_unwind_plan_fast_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric)); | 
| Jason Molenda | ab35aa9 | 2014-05-23 01:48:10 +0000 | [diff] [blame] | 148 | if (!assembly_profiler_sp->GetFastUnwindPlan (m_range, thread, *m_unwind_plan_fast_sp)) | 
| Jason Molenda | 2cd21b8 | 2013-09-24 02:42:54 +0000 | [diff] [blame] | 149 | m_unwind_plan_fast_sp.reset(); | 
|  | 150 | } | 
| Jason Molenda | ab4f192 | 2010-10-25 11:12:07 +0000 | [diff] [blame] | 151 | } | 
| Greg Clayton | e576ab2 | 2011-02-15 00:19:15 +0000 | [diff] [blame] | 152 | return m_unwind_plan_fast_sp; | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 153 | } | 
|  | 154 |  | 
| Greg Clayton | e576ab2 | 2011-02-15 00:19:15 +0000 | [diff] [blame] | 155 | UnwindPlanSP | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 156 | FuncUnwinders::GetUnwindPlanArchitectureDefault (Thread& thread) | 
|  | 157 | { | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 158 | // Lock the mutex to ensure we can always give out the most appropriate | 
|  | 159 | // information. We want to make sure if someone requests an unwind | 
|  | 160 | // plan, that they get one and don't run into a race condition where one | 
|  | 161 | // thread has started to create the unwind plan and has put it into | 
| Greg Clayton | e01e07b | 2013-04-18 18:10:51 +0000 | [diff] [blame] | 162 | // the unique pointer member variable, and have another thread enter this function | 
|  | 163 | // and return the partially filled pointer contained in the unique pointer. | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 164 | // We also want to make sure that we lock out other unwind plans from | 
|  | 165 | // being accessed until this one is done creating itself in case someone | 
|  | 166 | // had some code like: | 
|  | 167 | //  UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...) | 
|  | 168 | //  if (best_unwind_plan == NULL) | 
|  | 169 | //      best_unwind_plan = GetUnwindPlanAtNonCallSite (...) | 
|  | 170 | Mutex::Locker locker (m_mutex); | 
| Ed Maste | d4612ad | 2014-04-20 13:17:36 +0000 | [diff] [blame] | 171 | if (m_tried_unwind_arch_default == false && m_unwind_plan_arch_default_sp.get() == nullptr) | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 172 | { | 
| Greg Clayton | 877aaa5 | 2011-01-08 21:19:00 +0000 | [diff] [blame] | 173 | m_tried_unwind_arch_default = true; | 
| Greg Clayton | b0848c5 | 2011-01-08 00:05:12 +0000 | [diff] [blame] | 174 | Address current_pc; | 
| Greg Clayton | d9e416c | 2012-02-18 05:35:26 +0000 | [diff] [blame] | 175 | ProcessSP process_sp (thread.CalculateProcess()); | 
|  | 176 | if (process_sp) | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 177 | { | 
| Greg Clayton | d9e416c | 2012-02-18 05:35:26 +0000 | [diff] [blame] | 178 | ABI *abi = process_sp->GetABI().get(); | 
| Greg Clayton | 31f1d2f | 2011-05-11 18:39:18 +0000 | [diff] [blame] | 179 | if (abi) | 
|  | 180 | { | 
|  | 181 | m_unwind_plan_arch_default_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric)); | 
|  | 182 | if (m_unwind_plan_arch_default_sp) | 
|  | 183 | abi->CreateDefaultUnwindPlan(*m_unwind_plan_arch_default_sp); | 
|  | 184 | } | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 185 | } | 
|  | 186 | } | 
|  | 187 |  | 
| Greg Clayton | e576ab2 | 2011-02-15 00:19:15 +0000 | [diff] [blame] | 188 | return m_unwind_plan_arch_default_sp; | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 189 | } | 
|  | 190 |  | 
| Jason Molenda | 995cd3a | 2011-09-15 00:44:34 +0000 | [diff] [blame] | 191 | UnwindPlanSP | 
|  | 192 | FuncUnwinders::GetUnwindPlanArchitectureDefaultAtFunctionEntry (Thread& thread) | 
|  | 193 | { | 
|  | 194 | // Lock the mutex to ensure we can always give out the most appropriate | 
|  | 195 | // information. We want to make sure if someone requests an unwind | 
|  | 196 | // plan, that they get one and don't run into a race condition where one | 
|  | 197 | // thread has started to create the unwind plan and has put it into | 
| Greg Clayton | e01e07b | 2013-04-18 18:10:51 +0000 | [diff] [blame] | 198 | // the unique pointer member variable, and have another thread enter this function | 
|  | 199 | // and return the partially filled pointer contained in the unique pointer. | 
| Jason Molenda | 995cd3a | 2011-09-15 00:44:34 +0000 | [diff] [blame] | 200 | // We also want to make sure that we lock out other unwind plans from | 
|  | 201 | // being accessed until this one is done creating itself in case someone | 
|  | 202 | // had some code like: | 
|  | 203 | //  UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...) | 
|  | 204 | //  if (best_unwind_plan == NULL) | 
|  | 205 | //      best_unwind_plan = GetUnwindPlanAtNonCallSite (...) | 
|  | 206 | Mutex::Locker locker (m_mutex); | 
| Ed Maste | d4612ad | 2014-04-20 13:17:36 +0000 | [diff] [blame] | 207 | if (m_tried_unwind_arch_default_at_func_entry == false && m_unwind_plan_arch_default_at_func_entry_sp.get() == nullptr) | 
| Jason Molenda | 995cd3a | 2011-09-15 00:44:34 +0000 | [diff] [blame] | 208 | { | 
|  | 209 | m_tried_unwind_arch_default_at_func_entry = true; | 
|  | 210 | Address current_pc; | 
| Greg Clayton | d9e416c | 2012-02-18 05:35:26 +0000 | [diff] [blame] | 211 | ProcessSP process_sp (thread.CalculateProcess()); | 
|  | 212 | if (process_sp) | 
| Jason Molenda | 995cd3a | 2011-09-15 00:44:34 +0000 | [diff] [blame] | 213 | { | 
| Greg Clayton | d9e416c | 2012-02-18 05:35:26 +0000 | [diff] [blame] | 214 | ABI *abi = process_sp->GetABI().get(); | 
| Jason Molenda | 995cd3a | 2011-09-15 00:44:34 +0000 | [diff] [blame] | 215 | if (abi) | 
|  | 216 | { | 
|  | 217 | m_unwind_plan_arch_default_at_func_entry_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric)); | 
|  | 218 | if (m_unwind_plan_arch_default_at_func_entry_sp) | 
|  | 219 | abi->CreateFunctionEntryUnwindPlan(*m_unwind_plan_arch_default_at_func_entry_sp); | 
|  | 220 | } | 
|  | 221 | } | 
|  | 222 | } | 
|  | 223 |  | 
| Jason Molenda | 2cd21b8 | 2013-09-24 02:42:54 +0000 | [diff] [blame] | 224 | return m_unwind_plan_arch_default_at_func_entry_sp; | 
| Jason Molenda | 995cd3a | 2011-09-15 00:44:34 +0000 | [diff] [blame] | 225 | } | 
|  | 226 |  | 
|  | 227 |  | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 228 | Address& | 
|  | 229 | FuncUnwinders::GetFirstNonPrologueInsn (Target& target) | 
|  | 230 | { | 
|  | 231 | if (m_first_non_prologue_insn.IsValid()) | 
|  | 232 | return m_first_non_prologue_insn; | 
| Greg Clayton | 1ac04c3 | 2012-02-21 00:09:25 +0000 | [diff] [blame] | 233 | ExecutionContext exe_ctx (target.shared_from_this(), false); | 
| Jason Molenda | ab35aa9 | 2014-05-23 01:48:10 +0000 | [diff] [blame] | 234 | UnwindAssemblySP assembly_profiler_sp (GetUnwindAssemblyProfiler()); | 
|  | 235 | if (assembly_profiler_sp) | 
|  | 236 | if (assembly_profiler_sp) | 
|  | 237 | assembly_profiler_sp->FirstNonPrologueInsn (m_range, exe_ctx, m_first_non_prologue_insn); | 
| Jason Molenda | fbcb7f2 | 2010-09-10 07:49:16 +0000 | [diff] [blame] | 238 | return m_first_non_prologue_insn; | 
|  | 239 | } | 
|  | 240 |  | 
|  | 241 | const Address& | 
|  | 242 | FuncUnwinders::GetFunctionStartAddress () const | 
|  | 243 | { | 
|  | 244 | return m_range.GetBaseAddress(); | 
|  | 245 | } | 
|  | 246 |  | 
| Jason Molenda | 60f0bd4 | 2012-10-26 06:08:58 +0000 | [diff] [blame] | 247 | void | 
|  | 248 | FuncUnwinders::InvalidateNonCallSiteUnwindPlan (lldb_private::Thread& thread) | 
|  | 249 | { | 
|  | 250 | UnwindPlanSP arch_default = GetUnwindPlanArchitectureDefault (thread); | 
|  | 251 | if (arch_default && m_tried_unwind_at_call_site) | 
|  | 252 | { | 
|  | 253 | m_unwind_plan_call_site_sp = arch_default; | 
|  | 254 | } | 
|  | 255 | } | 
| Jason Molenda | ab35aa9 | 2014-05-23 01:48:10 +0000 | [diff] [blame] | 256 |  | 
|  | 257 | lldb::UnwindAssemblySP | 
|  | 258 | FuncUnwinders::GetUnwindAssemblyProfiler () | 
|  | 259 | { | 
|  | 260 | UnwindAssemblySP assembly_profiler_sp; | 
|  | 261 | ArchSpec arch; | 
|  | 262 | if (m_unwind_table.GetArchitecture (arch)) | 
|  | 263 | { | 
|  | 264 | assembly_profiler_sp = UnwindAssembly::FindPlugin (arch); | 
|  | 265 | } | 
|  | 266 | return assembly_profiler_sp; | 
|  | 267 | } |