blob: c419530e37b148cd75661d26727c1e0105b395c6 [file] [log] [blame]
Elliott Hughes68e76522011-10-05 13:22:16 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "stack.h"
18
19#include "compiler.h"
Ian Rogers0399dde2012-06-06 17:09:28 -070020#include "oat/runtime/context.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070021#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080022#include "object_utils.h"
Elliott Hughesbfe487b2011-10-26 15:48:55 -070023#include "thread_list.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070024
Elliott Hughes11d1b0c2012-01-23 16:57:47 -080025namespace art {
26
Elliott Hughesbfe487b2011-10-26 15:48:55 -070027class StackGetter {
28 public:
Ian Rogers365c1022012-06-22 15:05:28 -070029 StackGetter(const ScopedJniThreadState& ts, Thread* thread)
Elliott Hughes08fc03a2012-06-26 17:34:00 -070030 : ts_(ts), thread_(thread), trace_(NULL) {
Elliott Hughesbfe487b2011-10-26 15:48:55 -070031 }
32
33 static void Callback(void* arg) {
34 reinterpret_cast<StackGetter*>(arg)->Callback();
35 }
36
37 jobject GetTrace() {
38 return trace_;
39 }
40
41 private:
42 void Callback() {
Ian Rogers365c1022012-06-22 15:05:28 -070043 trace_ = thread_->CreateInternalStackTrace(ts_);
Elliott Hughesbfe487b2011-10-26 15:48:55 -070044 }
45
Ian Rogers365c1022012-06-22 15:05:28 -070046 const ScopedJniThreadState& ts_;
47 Thread* const thread_;
Elliott Hughesbfe487b2011-10-26 15:48:55 -070048 jobject trace_;
49};
50
Ian Rogers365c1022012-06-22 15:05:28 -070051jobject GetThreadStack(const ScopedJniThreadState& ts, Thread* thread) {
Elliott Hughesbfe487b2011-10-26 15:48:55 -070052 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers365c1022012-06-22 15:05:28 -070053 StackGetter stack_getter(ts, thread);
Elliott Hughesbfe487b2011-10-26 15:48:55 -070054 thread_list->RunWhileSuspended(thread, StackGetter::Callback, &stack_getter);
55 return stack_getter.GetTrace();
56}
57
Ian Rogers0399dde2012-06-06 17:09:28 -070058void ManagedStack::PushManagedStackFragment(ManagedStack* fragment) {
59 // Copy this top fragment into given fragment.
60 memcpy(fragment, this, sizeof(ManagedStack));
61 // Clear this fragment, which has become the top.
62 memset(this, 0, sizeof(ManagedStack));
63 // Link our top fragment onto the given fragment.
64 link_ = fragment;
65}
66
67void ManagedStack::PopManagedStackFragment(const ManagedStack& fragment) {
68 DCHECK(&fragment == link_);
69 // Copy this given fragment back to the top.
70 memcpy(this, &fragment, sizeof(ManagedStack));
71}
72
73size_t ManagedStack::NumShadowFrameReferences() const {
74 size_t count = 0;
75 for (const ManagedStack* current_fragment = this; current_fragment != NULL;
76 current_fragment = current_fragment->GetLink()) {
77 for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
78 current_frame = current_frame->GetLink()) {
79 count += current_frame->NumberOfReferences();
80 }
81 }
82 return count;
83}
84
85bool ManagedStack::ShadowFramesContain(Object** shadow_frame_entry) const {
86 for (const ManagedStack* current_fragment = this; current_fragment != NULL;
87 current_fragment = current_fragment->GetLink()) {
88 for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
89 current_frame = current_frame->GetLink()) {
90 if (current_frame->Contains(shadow_frame_entry)) {
91 return true;
92 }
93 }
94 }
95 return false;
96}
97
98uint32_t StackVisitor::GetDexPc() const {
99 if (cur_shadow_frame_ != NULL) {
100 return cur_shadow_frame_->GetDexPC();
101 } else if (cur_quick_frame_ != NULL) {
102 return GetMethod()->ToDexPC(AdjustQuickFramePcForDexPcComputation(cur_quick_frame_pc_));
103 } else {
104 return 0;
105 }
106}
107
108uint32_t StackVisitor::GetVReg(Method* m, int vreg) const {
Elliott Hughes08fc03a2012-06-26 17:34:00 -0700109 DCHECK(context_ != NULL); // You can't reliably read registers without a context.
Ian Rogers0399dde2012-06-06 17:09:28 -0700110 DCHECK(m == GetMethod());
111 uint32_t core_spills = m->GetCoreSpillMask();
112 const VmapTable vmap_table(m->GetVmapTableRaw());
113 uint32_t vmap_offset;
114 // TODO: IsInContext stops before spotting floating point registers.
115 if (vmap_table.IsInContext(vreg, vmap_offset)) {
116 // Compute the register we need to load from the context.
117 uint32_t spill_mask = core_spills;
118 CHECK_LT(vmap_offset, static_cast<uint32_t>(__builtin_popcount(spill_mask)));
119 uint32_t matches = 0;
120 uint32_t spill_shifts = 0;
121 while (matches != (vmap_offset + 1)) {
122 DCHECK_NE(spill_mask, 0u);
123 matches += spill_mask & 1; // Add 1 if the low bit is set.
124 spill_mask >>= 1;
125 spill_shifts++;
126 }
127 spill_shifts--; // Wind back one as we want the last match.
128 return GetGPR(spill_shifts);
129 } else {
130 const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
131 DCHECK(code_item != NULL); // can't be NULL or how would we compile its instructions?
132 uint32_t fp_spills = m->GetFpSpillMask();
133 size_t frame_size = m->GetFrameSizeInBytes();
134 return GetVReg(code_item, core_spills, fp_spills, frame_size, vreg);
135 }
136}
137
138void StackVisitor::SetVReg(Method* m, int vreg, uint32_t new_value) {
Elliott Hughes08fc03a2012-06-26 17:34:00 -0700139 DCHECK(context_ != NULL); // You can't reliably write registers without a context.
Ian Rogers0399dde2012-06-06 17:09:28 -0700140 DCHECK(m == GetMethod());
141 const VmapTable vmap_table(m->GetVmapTableRaw());
142 uint32_t vmap_offset;
143 // TODO: IsInContext stops before spotting floating point registers.
144 if (vmap_table.IsInContext(vreg, vmap_offset)) {
145 UNIMPLEMENTED(FATAL);
146 }
147 const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
148 DCHECK(code_item != NULL); // can't be NULL or how would we compile its instructions?
149 uint32_t core_spills = m->GetCoreSpillMask();
150 uint32_t fp_spills = m->GetFpSpillMask();
151 size_t frame_size = m->GetFrameSizeInBytes();
152 int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
153 byte* vreg_addr = reinterpret_cast<byte*>(GetCurrentQuickFrame()) + offset;
154 *reinterpret_cast<uint32_t*>(vreg_addr) = new_value;
155}
156
157uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
158 return context_->GetGPR(reg);
159}
160
161uintptr_t StackVisitor::GetReturnPc() const {
162 Method** sp = GetCurrentQuickFrame();
163 CHECK(sp != NULL);
164 byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
165 return *reinterpret_cast<uintptr_t*>(pc_addr);
166}
167
168void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
169 Method** sp = GetCurrentQuickFrame();
170 CHECK(sp != NULL);
171 byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
172 *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
173}
174
175size_t StackVisitor::ComputeNumFrames() const {
176 struct NumFramesVisitor : public StackVisitor {
177 explicit NumFramesVisitor(const ManagedStack* stack,
Ian Rogersca190662012-06-26 15:45:57 -0700178 const std::vector<TraceStackFrame>* trace_stack)
Elliott Hughes08fc03a2012-06-26 17:34:00 -0700179 : StackVisitor(stack, trace_stack, NULL), frames(0) {}
Ian Rogers0399dde2012-06-06 17:09:28 -0700180
181 virtual bool VisitFrame() {
182 frames++;
183 return true;
184 }
Elliott Hughes08fc03a2012-06-26 17:34:00 -0700185
Ian Rogers0399dde2012-06-06 17:09:28 -0700186 size_t frames;
187 };
188
189 NumFramesVisitor visitor(stack_start_, trace_stack_);
190 visitor.WalkStack(true);
191 return visitor.frames;
192}
193
194void StackVisitor::SanityCheckFrame() {
195#ifndef NDEBUG
196 Method* method = GetMethod();
197 CHECK(method->GetClass() == Method::GetMethodClass() ||
198 method->GetClass() == Method::GetConstructorClass());
199 if (cur_quick_frame_ != NULL) {
200 method->AssertPcIsWithinCode(AdjustQuickFramePcForDexPcComputation(cur_quick_frame_pc_));
201 // Frame sanity.
202 size_t frame_size = method->GetFrameSizeInBytes();
203 CHECK_NE(frame_size, 0u);
204 CHECK_LT(frame_size, 1024u);
205 size_t return_pc_offset = method->GetReturnPcOffsetInBytes();
206 CHECK_LT(return_pc_offset, frame_size);
207 }
208#endif
209}
210
211void StackVisitor::WalkStack(bool include_transitions) {
212 bool method_tracing_active = Runtime::Current()->IsMethodTracingActive();
213 uint32_t trace_stack_depth = 0;
214 for (const ManagedStack* current_fragment = stack_start_; current_fragment != NULL;
215 current_fragment = current_fragment->GetLink()) {
216 cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
217 cur_quick_frame_ = current_fragment->GetTopQuickFrame();
218 cur_quick_frame_pc_ = current_fragment->GetTopQuickFramePc();
219 if (cur_quick_frame_ != NULL) { // Handle quick stack frames.
220 // Can't be both a shadow and a quick fragment.
221 DCHECK(current_fragment->GetTopShadowFrame() == NULL);
222 Method* method = *cur_quick_frame_;
223 do {
224 SanityCheckFrame();
225 bool should_continue = VisitFrame();
226 if (UNLIKELY(!should_continue)) {
227 return;
228 }
229 if (context_ != NULL) {
230 context_->FillCalleeSaves(*this);
231 }
232 size_t frame_size = method->GetFrameSizeInBytes();
233 // Compute PC for next stack frame from return PC.
234 size_t return_pc_offset = method->GetReturnPcOffsetInBytes();
235 byte* return_pc_addr = reinterpret_cast<byte*>(cur_quick_frame_) + return_pc_offset;
236 uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
237 if (UNLIKELY(method_tracing_active)) {
238 // While profiling, the return pc is restored from the side stack, except when walking
239 // the stack for an exception where the side stack will be unwound in VisitFrame.
240 // TODO: stop using include_transitions as a proxy for is this the catch block visitor.
241 if (IsTraceExitPc(return_pc) && !include_transitions) {
242 // TODO: unify trace and managed stack.
243 TraceStackFrame trace_frame = GetTraceStackFrame(trace_stack_depth);
244 trace_stack_depth++;
245 CHECK(trace_frame.method_ == GetMethod()) << "Excepted: " << PrettyMethod(method)
246 << " Found: " << PrettyMethod(GetMethod());
247 return_pc = trace_frame.return_pc_;
248 }
249 }
250 cur_quick_frame_pc_ = return_pc;
251 byte* next_frame = reinterpret_cast<byte*>(cur_quick_frame_) + frame_size;
252 cur_quick_frame_ = reinterpret_cast<Method**>(next_frame);
253 cur_depth_++;
254 method = *cur_quick_frame_;
255 } while (method != NULL);
256 } else if (cur_shadow_frame_ != NULL) {
257 do {
258 SanityCheckFrame();
259 bool should_continue = VisitFrame();
260 if (UNLIKELY(!should_continue)) {
261 return;
262 }
263 cur_depth_++;
264 cur_shadow_frame_ = cur_shadow_frame_->GetLink();
265 } while(cur_shadow_frame_ != NULL);
266 }
267 cur_depth_++;
268 if (include_transitions) {
269 bool should_continue = VisitFrame();
270 if (!should_continue) {
271 return;
272 }
273 }
274 }
275}
276
Elliott Hughes68e76522011-10-05 13:22:16 -0700277} // namespace art