blob: 336f8adfd1a06bc86e7229457f6fe47108a0aaed [file] [log] [blame]
Elliott Hughes68e76522011-10-05 13:22:16 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "stack.h"
18
19#include "compiler.h"
Ian Rogers0399dde2012-06-06 17:09:28 -070020#include "oat/runtime/context.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070021#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080022#include "object_utils.h"
Elliott Hughesbfe487b2011-10-26 15:48:55 -070023#include "thread_list.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070024
Elliott Hughes11d1b0c2012-01-23 16:57:47 -080025namespace art {
26
Elliott Hughesbfe487b2011-10-26 15:48:55 -070027class StackGetter {
28 public:
29 StackGetter(JNIEnv* env, Thread* thread) : env_(env), thread_(thread), trace_(NULL) {
30 }
31
32 static void Callback(void* arg) {
33 reinterpret_cast<StackGetter*>(arg)->Callback();
34 }
35
36 jobject GetTrace() {
37 return trace_;
38 }
39
40 private:
41 void Callback() {
42 trace_ = thread_->CreateInternalStackTrace(env_);
43 }
44
45 JNIEnv* env_;
46 Thread* thread_;
47 jobject trace_;
48};
49
50jobject GetThreadStack(JNIEnv* env, Thread* thread) {
51 ThreadList* thread_list = Runtime::Current()->GetThreadList();
52 StackGetter stack_getter(env, thread);
53 thread_list->RunWhileSuspended(thread, StackGetter::Callback, &stack_getter);
54 return stack_getter.GetTrace();
55}
56
Ian Rogers0399dde2012-06-06 17:09:28 -070057void ManagedStack::PushManagedStackFragment(ManagedStack* fragment) {
58 // Copy this top fragment into given fragment.
59 memcpy(fragment, this, sizeof(ManagedStack));
60 // Clear this fragment, which has become the top.
61 memset(this, 0, sizeof(ManagedStack));
62 // Link our top fragment onto the given fragment.
63 link_ = fragment;
64}
65
66void ManagedStack::PopManagedStackFragment(const ManagedStack& fragment) {
67 DCHECK(&fragment == link_);
68 // Copy this given fragment back to the top.
69 memcpy(this, &fragment, sizeof(ManagedStack));
70}
71
72size_t ManagedStack::NumShadowFrameReferences() const {
73 size_t count = 0;
74 for (const ManagedStack* current_fragment = this; current_fragment != NULL;
75 current_fragment = current_fragment->GetLink()) {
76 for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
77 current_frame = current_frame->GetLink()) {
78 count += current_frame->NumberOfReferences();
79 }
80 }
81 return count;
82}
83
84bool ManagedStack::ShadowFramesContain(Object** shadow_frame_entry) const {
85 for (const ManagedStack* current_fragment = this; current_fragment != NULL;
86 current_fragment = current_fragment->GetLink()) {
87 for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
88 current_frame = current_frame->GetLink()) {
89 if (current_frame->Contains(shadow_frame_entry)) {
90 return true;
91 }
92 }
93 }
94 return false;
95}
96
97uint32_t StackVisitor::GetDexPc() const {
98 if (cur_shadow_frame_ != NULL) {
99 return cur_shadow_frame_->GetDexPC();
100 } else if (cur_quick_frame_ != NULL) {
101 return GetMethod()->ToDexPC(AdjustQuickFramePcForDexPcComputation(cur_quick_frame_pc_));
102 } else {
103 return 0;
104 }
105}
106
107uint32_t StackVisitor::GetVReg(Method* m, int vreg) const {
108 DCHECK(m == GetMethod());
109 uint32_t core_spills = m->GetCoreSpillMask();
110 const VmapTable vmap_table(m->GetVmapTableRaw());
111 uint32_t vmap_offset;
112 // TODO: IsInContext stops before spotting floating point registers.
113 if (vmap_table.IsInContext(vreg, vmap_offset)) {
114 // Compute the register we need to load from the context.
115 uint32_t spill_mask = core_spills;
116 CHECK_LT(vmap_offset, static_cast<uint32_t>(__builtin_popcount(spill_mask)));
117 uint32_t matches = 0;
118 uint32_t spill_shifts = 0;
119 while (matches != (vmap_offset + 1)) {
120 DCHECK_NE(spill_mask, 0u);
121 matches += spill_mask & 1; // Add 1 if the low bit is set.
122 spill_mask >>= 1;
123 spill_shifts++;
124 }
125 spill_shifts--; // Wind back one as we want the last match.
126 return GetGPR(spill_shifts);
127 } else {
128 const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
129 DCHECK(code_item != NULL); // can't be NULL or how would we compile its instructions?
130 uint32_t fp_spills = m->GetFpSpillMask();
131 size_t frame_size = m->GetFrameSizeInBytes();
132 return GetVReg(code_item, core_spills, fp_spills, frame_size, vreg);
133 }
134}
135
136void StackVisitor::SetVReg(Method* m, int vreg, uint32_t new_value) {
137 DCHECK(m == GetMethod());
138 const VmapTable vmap_table(m->GetVmapTableRaw());
139 uint32_t vmap_offset;
140 // TODO: IsInContext stops before spotting floating point registers.
141 if (vmap_table.IsInContext(vreg, vmap_offset)) {
142 UNIMPLEMENTED(FATAL);
143 }
144 const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
145 DCHECK(code_item != NULL); // can't be NULL or how would we compile its instructions?
146 uint32_t core_spills = m->GetCoreSpillMask();
147 uint32_t fp_spills = m->GetFpSpillMask();
148 size_t frame_size = m->GetFrameSizeInBytes();
149 int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
150 byte* vreg_addr = reinterpret_cast<byte*>(GetCurrentQuickFrame()) + offset;
151 *reinterpret_cast<uint32_t*>(vreg_addr) = new_value;
152}
153
154uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
155 return context_->GetGPR(reg);
156}
157
158uintptr_t StackVisitor::GetReturnPc() const {
159 Method** sp = GetCurrentQuickFrame();
160 CHECK(sp != NULL);
161 byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
162 return *reinterpret_cast<uintptr_t*>(pc_addr);
163}
164
165void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
166 Method** sp = GetCurrentQuickFrame();
167 CHECK(sp != NULL);
168 byte* pc_addr = reinterpret_cast<byte*>(sp) + GetMethod()->GetReturnPcOffsetInBytes();
169 *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
170}
171
172size_t StackVisitor::ComputeNumFrames() const {
173 struct NumFramesVisitor : public StackVisitor {
174 explicit NumFramesVisitor(const ManagedStack* stack,
175 const std::vector<TraceStackFrame>* trace_stack) :
176 StackVisitor(stack, trace_stack), frames(0) {}
177
178 virtual bool VisitFrame() {
179 frames++;
180 return true;
181 }
182 size_t frames;
183 };
184
185 NumFramesVisitor visitor(stack_start_, trace_stack_);
186 visitor.WalkStack(true);
187 return visitor.frames;
188}
189
190void StackVisitor::SanityCheckFrame() {
191#ifndef NDEBUG
192 Method* method = GetMethod();
193 CHECK(method->GetClass() == Method::GetMethodClass() ||
194 method->GetClass() == Method::GetConstructorClass());
195 if (cur_quick_frame_ != NULL) {
196 method->AssertPcIsWithinCode(AdjustQuickFramePcForDexPcComputation(cur_quick_frame_pc_));
197 // Frame sanity.
198 size_t frame_size = method->GetFrameSizeInBytes();
199 CHECK_NE(frame_size, 0u);
200 CHECK_LT(frame_size, 1024u);
201 size_t return_pc_offset = method->GetReturnPcOffsetInBytes();
202 CHECK_LT(return_pc_offset, frame_size);
203 }
204#endif
205}
206
207void StackVisitor::WalkStack(bool include_transitions) {
208 bool method_tracing_active = Runtime::Current()->IsMethodTracingActive();
209 uint32_t trace_stack_depth = 0;
210 for (const ManagedStack* current_fragment = stack_start_; current_fragment != NULL;
211 current_fragment = current_fragment->GetLink()) {
212 cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
213 cur_quick_frame_ = current_fragment->GetTopQuickFrame();
214 cur_quick_frame_pc_ = current_fragment->GetTopQuickFramePc();
215 if (cur_quick_frame_ != NULL) { // Handle quick stack frames.
216 // Can't be both a shadow and a quick fragment.
217 DCHECK(current_fragment->GetTopShadowFrame() == NULL);
218 Method* method = *cur_quick_frame_;
219 do {
220 SanityCheckFrame();
221 bool should_continue = VisitFrame();
222 if (UNLIKELY(!should_continue)) {
223 return;
224 }
225 if (context_ != NULL) {
226 context_->FillCalleeSaves(*this);
227 }
228 size_t frame_size = method->GetFrameSizeInBytes();
229 // Compute PC for next stack frame from return PC.
230 size_t return_pc_offset = method->GetReturnPcOffsetInBytes();
231 byte* return_pc_addr = reinterpret_cast<byte*>(cur_quick_frame_) + return_pc_offset;
232 uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
233 if (UNLIKELY(method_tracing_active)) {
234 // While profiling, the return pc is restored from the side stack, except when walking
235 // the stack for an exception where the side stack will be unwound in VisitFrame.
236 // TODO: stop using include_transitions as a proxy for is this the catch block visitor.
237 if (IsTraceExitPc(return_pc) && !include_transitions) {
238 // TODO: unify trace and managed stack.
239 TraceStackFrame trace_frame = GetTraceStackFrame(trace_stack_depth);
240 trace_stack_depth++;
241 CHECK(trace_frame.method_ == GetMethod()) << "Excepted: " << PrettyMethod(method)
242 << " Found: " << PrettyMethod(GetMethod());
243 return_pc = trace_frame.return_pc_;
244 }
245 }
246 cur_quick_frame_pc_ = return_pc;
247 byte* next_frame = reinterpret_cast<byte*>(cur_quick_frame_) + frame_size;
248 cur_quick_frame_ = reinterpret_cast<Method**>(next_frame);
249 cur_depth_++;
250 method = *cur_quick_frame_;
251 } while (method != NULL);
252 } else if (cur_shadow_frame_ != NULL) {
253 do {
254 SanityCheckFrame();
255 bool should_continue = VisitFrame();
256 if (UNLIKELY(!should_continue)) {
257 return;
258 }
259 cur_depth_++;
260 cur_shadow_frame_ = cur_shadow_frame_->GetLink();
261 } while(cur_shadow_frame_ != NULL);
262 }
263 cur_depth_++;
264 if (include_transitions) {
265 bool should_continue = VisitFrame();
266 if (!should_continue) {
267 return;
268 }
269 }
270 }
271}
272
Elliott Hughes68e76522011-10-05 13:22:16 -0700273} // namespace art