blob: ffdfd31ce64cb0e6f8237362a277828217a50400 [file] [log] [blame]
Man Cao8c2ff642015-05-27 17:25:30 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ALLOCATION_RECORD_H_
18#define ART_RUNTIME_GC_ALLOCATION_RECORD_H_
19
20#include <list>
21
22#include "base/mutex.h"
23#include "object_callbacks.h"
24#include "gc_root.h"
25
26namespace art {
27
28class ArtMethod;
29class Thread;
30
31namespace mirror {
32 class Class;
33 class Object;
34}
35
36namespace gc {
37
38class AllocRecordStackTraceElement {
39 public:
40 AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {}
41
Mathieu Chartier90443472015-07-16 20:32:27 -070042 int32_t ComputeLineNumber() const SHARED_REQUIRES(Locks::mutator_lock_);
Man Cao8c2ff642015-05-27 17:25:30 -070043
44 ArtMethod* GetMethod() const {
45 return method_;
46 }
47
48 void SetMethod(ArtMethod* m) {
49 method_ = m;
50 }
51
52 uint32_t GetDexPc() const {
53 return dex_pc_;
54 }
55
56 void SetDexPc(uint32_t pc) {
57 dex_pc_ = pc;
58 }
59
60 bool operator==(const AllocRecordStackTraceElement& other) const {
61 if (this == &other) return true;
62 return method_ == other.method_ && dex_pc_ == other.dex_pc_;
63 }
64
65 private:
66 ArtMethod* method_;
67 uint32_t dex_pc_;
68};
69
70class AllocRecordStackTrace {
71 public:
72 static constexpr size_t kHashMultiplier = 17;
73
Man Cao1ed11b92015-06-11 22:47:35 -070074 explicit AllocRecordStackTrace(size_t max_depth)
75 : tid_(0), depth_(0), stack_(new AllocRecordStackTraceElement[max_depth]) {}
76
77 AllocRecordStackTrace(const AllocRecordStackTrace& r)
78 : tid_(r.tid_), depth_(r.depth_), stack_(new AllocRecordStackTraceElement[r.depth_]) {
79 for (size_t i = 0; i < depth_; ++i) {
80 stack_[i] = r.stack_[i];
81 }
82 }
Man Cao8c2ff642015-05-27 17:25:30 -070083
84 ~AllocRecordStackTrace() {
85 delete[] stack_;
86 }
87
88 pid_t GetTid() const {
89 return tid_;
90 }
91
Man Cao1ed11b92015-06-11 22:47:35 -070092 void SetTid(pid_t t) {
93 tid_ = t;
94 }
95
Man Cao8c2ff642015-05-27 17:25:30 -070096 size_t GetDepth() const {
97 return depth_;
98 }
99
100 void SetDepth(size_t depth) {
101 depth_ = depth;
102 }
103
104 const AllocRecordStackTraceElement& GetStackElement(size_t index) const {
105 DCHECK_LT(index, depth_);
106 return stack_[index];
107 }
108
109 void SetStackElementAt(size_t index, ArtMethod* m, uint32_t dex_pc) {
110 stack_[index].SetMethod(m);
111 stack_[index].SetDexPc(dex_pc);
112 }
113
114 bool operator==(const AllocRecordStackTrace& other) const {
115 if (this == &other) return true;
Man Cao1ed11b92015-06-11 22:47:35 -0700116 if (tid_ != other.tid_) return false;
Man Cao8c2ff642015-05-27 17:25:30 -0700117 if (depth_ != other.depth_) return false;
118 for (size_t i = 0; i < depth_; ++i) {
119 if (!(stack_[i] == other.stack_[i])) return false;
120 }
121 return true;
122 }
123
124 private:
Man Cao1ed11b92015-06-11 22:47:35 -0700125 pid_t tid_;
Man Cao8c2ff642015-05-27 17:25:30 -0700126 size_t depth_;
127 AllocRecordStackTraceElement* const stack_;
128};
129
130struct HashAllocRecordTypes {
131 size_t operator()(const AllocRecordStackTraceElement& r) const {
132 return std::hash<void*>()(reinterpret_cast<void*>(r.GetMethod())) *
133 AllocRecordStackTrace::kHashMultiplier + std::hash<uint32_t>()(r.GetDexPc());
134 }
135
136 size_t operator()(const AllocRecordStackTrace& r) const {
137 size_t depth = r.GetDepth();
138 size_t result = r.GetTid() * AllocRecordStackTrace::kHashMultiplier + depth;
139 for (size_t i = 0; i < depth; ++i) {
140 result = result * AllocRecordStackTrace::kHashMultiplier + (*this)(r.GetStackElement(i));
141 }
142 return result;
143 }
144};
145
146template <typename T> struct HashAllocRecordTypesPtr {
147 size_t operator()(const T* r) const {
148 if (r == nullptr) return 0;
149 return HashAllocRecordTypes()(*r);
150 }
151};
152
153template <typename T> struct EqAllocRecordTypesPtr {
154 bool operator()(const T* r1, const T* r2) const {
155 if (r1 == r2) return true;
156 if (r1 == nullptr || r2 == nullptr) return false;
157 return *r1 == *r2;
158 }
159};
160
161class AllocRecord {
162 public:
163 // All instances of AllocRecord should be managed by an instance of AllocRecordObjectMap.
Man Cao42c3c332015-06-23 16:38:25 -0700164 AllocRecord(size_t count, mirror::Class* klass, AllocRecordStackTrace* trace)
165 : byte_count_(count), klass_(klass), trace_(trace) {}
Man Cao8c2ff642015-05-27 17:25:30 -0700166
167 ~AllocRecord() {
168 delete trace_;
169 }
170
171 size_t GetDepth() const {
172 return trace_->GetDepth();
173 }
174
175 const AllocRecordStackTrace* GetStackTrace() const {
176 return trace_;
177 }
178
179 size_t ByteCount() const {
180 return byte_count_;
181 }
182
183 pid_t GetTid() const {
184 return trace_->GetTid();
185 }
186
Mathieu Chartier90443472015-07-16 20:32:27 -0700187 mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
Man Cao42c3c332015-06-23 16:38:25 -0700188 return klass_.Read();
189 }
190
Man Cao41656de2015-07-06 18:53:15 -0700191 const char* GetClassDescriptor(std::string* storage) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700192 SHARED_REQUIRES(Locks::mutator_lock_);
Man Cao41656de2015-07-06 18:53:15 -0700193
Mathieu Chartier90443472015-07-16 20:32:27 -0700194 GcRoot<mirror::Class>& GetClassGcRoot() SHARED_REQUIRES(Locks::mutator_lock_) {
Man Cao42c3c332015-06-23 16:38:25 -0700195 return klass_;
196 }
197
Man Cao8c2ff642015-05-27 17:25:30 -0700198 const AllocRecordStackTraceElement& StackElement(size_t index) const {
199 return trace_->GetStackElement(index);
200 }
201
202 private:
203 const size_t byte_count_;
Man Cao42c3c332015-06-23 16:38:25 -0700204 // The klass_ could be a strong or weak root for GC
205 GcRoot<mirror::Class> klass_;
Man Cao8c2ff642015-05-27 17:25:30 -0700206 // TODO: Currently trace_ is like a std::unique_ptr,
207 // but in future with deduplication it could be a std::shared_ptr.
208 const AllocRecordStackTrace* const trace_;
209};
210
211class AllocRecordObjectMap {
212 public:
Man Cao42c3c332015-06-23 16:38:25 -0700213 // GcRoot<mirror::Object> pointers in the list are weak roots, and the last recent_record_max_
214 // number of AllocRecord::klass_ pointers are strong roots (and the rest of klass_ pointers are
215 // weak roots). The last recent_record_max_ number of pairs in the list are always kept for DDMS's
216 // recent allocation tracking, but GcRoot<mirror::Object> pointers in these pairs can become null.
217 // Both types of pointers need read barriers, do not directly access them.
Man Cao8c2ff642015-05-27 17:25:30 -0700218 typedef std::list<std::pair<GcRoot<mirror::Object>, AllocRecord*>> EntryList;
219
220 // "static" because it is part of double-checked locking. It needs to check a bool first,
221 // in order to make sure the AllocRecordObjectMap object is not null.
Man Cao42c3c332015-06-23 16:38:25 -0700222 static void RecordAllocation(Thread* self, mirror::Object* obj, mirror::Class* klass,
223 size_t byte_count)
Mathieu Chartier90443472015-07-16 20:32:27 -0700224 REQUIRES(!Locks::alloc_tracker_lock_)
225 SHARED_REQUIRES(Locks::mutator_lock_);
Man Cao8c2ff642015-05-27 17:25:30 -0700226
Mathieu Chartier90443472015-07-16 20:32:27 -0700227 static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
Man Cao8c2ff642015-05-27 17:25:30 -0700228
Mathieu Chartier90443472015-07-16 20:32:27 -0700229 AllocRecordObjectMap() REQUIRES(Locks::alloc_tracker_lock_)
Man Cao8c2ff642015-05-27 17:25:30 -0700230 : alloc_record_max_(kDefaultNumAllocRecords),
Man Cao1ed11b92015-06-11 22:47:35 -0700231 recent_record_max_(kDefaultNumRecentRecords),
Man Cao8c2ff642015-05-27 17:25:30 -0700232 max_stack_depth_(kDefaultAllocStackDepth),
Man Cao1ed11b92015-06-11 22:47:35 -0700233 scratch_trace_(kMaxSupportedStackDepth),
Man Cao42c3c332015-06-23 16:38:25 -0700234 alloc_ddm_thread_id_(0),
235 allow_new_record_(true),
236 new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
Man Cao8c2ff642015-05-27 17:25:30 -0700237
238 ~AllocRecordObjectMap();
239
240 void Put(mirror::Object* obj, AllocRecord* record)
Mathieu Chartier90443472015-07-16 20:32:27 -0700241 SHARED_REQUIRES(Locks::mutator_lock_)
242 REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao1ed11b92015-06-11 22:47:35 -0700243 if (entries_.size() == alloc_record_max_) {
244 delete entries_.front().second;
245 entries_.pop_front();
246 }
Man Cao8c2ff642015-05-27 17:25:30 -0700247 entries_.emplace_back(GcRoot<mirror::Object>(obj), record);
248 }
249
Mathieu Chartier90443472015-07-16 20:32:27 -0700250 size_t Size() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao8c2ff642015-05-27 17:25:30 -0700251 return entries_.size();
252 }
253
Mathieu Chartier90443472015-07-16 20:32:27 -0700254 size_t GetRecentAllocationSize() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao1ed11b92015-06-11 22:47:35 -0700255 CHECK_LE(recent_record_max_, alloc_record_max_);
256 size_t sz = entries_.size();
257 return std::min(recent_record_max_, sz);
258 }
259
260 void VisitRoots(RootVisitor* visitor)
Mathieu Chartier90443472015-07-16 20:32:27 -0700261 SHARED_REQUIRES(Locks::mutator_lock_)
262 REQUIRES(Locks::alloc_tracker_lock_);
Man Cao8c2ff642015-05-27 17:25:30 -0700263
Mathieu Chartier97509952015-07-13 14:35:43 -0700264 void SweepAllocationRecords(IsMarkedVisitor* visitor)
Mathieu Chartier90443472015-07-16 20:32:27 -0700265 SHARED_REQUIRES(Locks::mutator_lock_)
266 REQUIRES(Locks::alloc_tracker_lock_);
Man Cao8c2ff642015-05-27 17:25:30 -0700267
Man Cao41656de2015-07-06 18:53:15 -0700268 // Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and
269 // AllowNewAllocationRecords(), in which case new allocation records can be added although they
270 // should be disallowed. However, this is GC-safe because new objects are not processed in this GC
271 // cycle. The only downside of not handling this case is that such new allocation records can be
272 // swept from the list. But missing the first few records is acceptable for using the button to
273 // enable allocation tracking.
Man Cao42c3c332015-06-23 16:38:25 -0700274 void DisallowNewAllocationRecords()
Mathieu Chartier90443472015-07-16 20:32:27 -0700275 SHARED_REQUIRES(Locks::mutator_lock_)
276 REQUIRES(Locks::alloc_tracker_lock_);
Man Cao42c3c332015-06-23 16:38:25 -0700277 void AllowNewAllocationRecords()
Mathieu Chartier90443472015-07-16 20:32:27 -0700278 SHARED_REQUIRES(Locks::mutator_lock_)
279 REQUIRES(Locks::alloc_tracker_lock_);
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700280 void BroadcastForNewAllocationRecords()
281 SHARED_REQUIRES(Locks::mutator_lock_)
282 REQUIRES(Locks::alloc_tracker_lock_);
Man Cao42c3c332015-06-23 16:38:25 -0700283
Man Cao8c2ff642015-05-27 17:25:30 -0700284 // TODO: Is there a better way to hide the entries_'s type?
285 EntryList::iterator Begin()
Mathieu Chartier90443472015-07-16 20:32:27 -0700286 SHARED_REQUIRES(Locks::mutator_lock_)
287 REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao8c2ff642015-05-27 17:25:30 -0700288 return entries_.begin();
289 }
290
291 EntryList::iterator End()
Mathieu Chartier90443472015-07-16 20:32:27 -0700292 SHARED_REQUIRES(Locks::mutator_lock_)
293 REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao8c2ff642015-05-27 17:25:30 -0700294 return entries_.end();
295 }
296
297 EntryList::reverse_iterator RBegin()
Mathieu Chartier90443472015-07-16 20:32:27 -0700298 SHARED_REQUIRES(Locks::mutator_lock_)
299 REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao8c2ff642015-05-27 17:25:30 -0700300 return entries_.rbegin();
301 }
302
303 EntryList::reverse_iterator REnd()
Mathieu Chartier90443472015-07-16 20:32:27 -0700304 SHARED_REQUIRES(Locks::mutator_lock_)
305 REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao8c2ff642015-05-27 17:25:30 -0700306 return entries_.rend();
307 }
308
309 private:
310 static constexpr size_t kDefaultNumAllocRecords = 512 * 1024;
Man Cao1ed11b92015-06-11 22:47:35 -0700311 static constexpr size_t kDefaultNumRecentRecords = 64 * 1024 - 1;
312 static constexpr size_t kDefaultAllocStackDepth = 16;
313 static constexpr size_t kMaxSupportedStackDepth = 128;
Man Cao8c2ff642015-05-27 17:25:30 -0700314 size_t alloc_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_);
Man Cao1ed11b92015-06-11 22:47:35 -0700315 size_t recent_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_);
Man Cao8c2ff642015-05-27 17:25:30 -0700316 size_t max_stack_depth_ GUARDED_BY(Locks::alloc_tracker_lock_);
Man Cao1ed11b92015-06-11 22:47:35 -0700317 AllocRecordStackTrace scratch_trace_ GUARDED_BY(Locks::alloc_tracker_lock_);
Man Cao8c2ff642015-05-27 17:25:30 -0700318 pid_t alloc_ddm_thread_id_ GUARDED_BY(Locks::alloc_tracker_lock_);
Man Cao42c3c332015-06-23 16:38:25 -0700319 bool allow_new_record_ GUARDED_BY(Locks::alloc_tracker_lock_);
320 ConditionVariable new_record_condition_ GUARDED_BY(Locks::alloc_tracker_lock_);
321 // see the comment in typedef of EntryList
Man Cao8c2ff642015-05-27 17:25:30 -0700322 EntryList entries_ GUARDED_BY(Locks::alloc_tracker_lock_);
323
Mathieu Chartier90443472015-07-16 20:32:27 -0700324 void SetProperties() REQUIRES(Locks::alloc_tracker_lock_);
Man Cao8c2ff642015-05-27 17:25:30 -0700325};
326
327} // namespace gc
328} // namespace art
329#endif // ART_RUNTIME_GC_ALLOCATION_RECORD_H_