blob: 369e4083a2ab11e5f503d0f959557d291fbdf610 [file] [log] [blame]
Man Cao8c2ff642015-05-27 17:25:30 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "allocation_record.h"
18
19#include "art_method-inl.h"
20#include "base/stl_util.h"
21#include "stack.h"
22
Andreas Gampec60e1b72015-07-30 08:57:50 -070023#ifdef __ANDROID__
Man Cao8c2ff642015-05-27 17:25:30 -070024#include "cutils/properties.h"
25#endif
26
27namespace art {
28namespace gc {
29
30int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
31 DCHECK(method_ != nullptr);
32 return method_->GetLineNumFromDexPC(dex_pc_);
33}
34
Man Cao41656de2015-07-06 18:53:15 -070035const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
36 // klass_ could contain null only if we implement class unloading.
37 if (UNLIKELY(klass_.IsNull())) {
38 return "null";
39 } else {
40 return klass_.Read()->GetDescriptor(storage);
41 }
42}
43
Man Cao8c2ff642015-05-27 17:25:30 -070044void AllocRecordObjectMap::SetProperties() {
Andreas Gampec60e1b72015-07-30 08:57:50 -070045#ifdef __ANDROID__
Man Cao8c2ff642015-05-27 17:25:30 -070046 // Check whether there's a system property overriding the max number of records.
47 const char* propertyName = "dalvik.vm.allocTrackerMax";
48 char allocMaxString[PROPERTY_VALUE_MAX];
49 if (property_get(propertyName, allocMaxString, "") > 0) {
50 char* end;
51 size_t value = strtoul(allocMaxString, &end, 10);
52 if (*end != '\0') {
53 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocMaxString
54 << "' --- invalid";
55 } else {
56 alloc_record_max_ = value;
Man Cao1ed11b92015-06-11 22:47:35 -070057 if (recent_record_max_ > value) {
58 recent_record_max_ = value;
59 }
60 }
61 }
62 // Check whether there's a system property overriding the number of recent records.
63 propertyName = "dalvik.vm.recentAllocMax";
64 char recentAllocMaxString[PROPERTY_VALUE_MAX];
65 if (property_get(propertyName, recentAllocMaxString, "") > 0) {
66 char* end;
67 size_t value = strtoul(recentAllocMaxString, &end, 10);
68 if (*end != '\0') {
69 LOG(ERROR) << "Ignoring " << propertyName << " '" << recentAllocMaxString
70 << "' --- invalid";
71 } else if (value > alloc_record_max_) {
72 LOG(ERROR) << "Ignoring " << propertyName << " '" << recentAllocMaxString
73 << "' --- should be less than " << alloc_record_max_;
74 } else {
75 recent_record_max_ = value;
Man Cao8c2ff642015-05-27 17:25:30 -070076 }
77 }
78 // Check whether there's a system property overriding the max depth of stack trace.
Man Cao1ed11b92015-06-11 22:47:35 -070079 propertyName = "debug.allocTracker.stackDepth";
Man Cao8c2ff642015-05-27 17:25:30 -070080 char stackDepthString[PROPERTY_VALUE_MAX];
81 if (property_get(propertyName, stackDepthString, "") > 0) {
82 char* end;
83 size_t value = strtoul(stackDepthString, &end, 10);
84 if (*end != '\0') {
85 LOG(ERROR) << "Ignoring " << propertyName << " '" << stackDepthString
86 << "' --- invalid";
Man Cao1ed11b92015-06-11 22:47:35 -070087 } else if (value > kMaxSupportedStackDepth) {
88 LOG(WARNING) << propertyName << " '" << stackDepthString << "' too large, using "
89 << kMaxSupportedStackDepth;
90 max_stack_depth_ = kMaxSupportedStackDepth;
Man Cao8c2ff642015-05-27 17:25:30 -070091 } else {
92 max_stack_depth_ = value;
93 }
94 }
95#endif
96}
97
98AllocRecordObjectMap::~AllocRecordObjectMap() {
99 STLDeleteValues(&entries_);
100}
101
Man Cao1ed11b92015-06-11 22:47:35 -0700102void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
103 CHECK_LE(recent_record_max_, alloc_record_max_);
104 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
105 size_t count = recent_record_max_;
Man Cao42c3c332015-06-23 16:38:25 -0700106 // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
107 // klass_ fields as strong roots.
Man Cao1ed11b92015-06-11 22:47:35 -0700108 for (auto it = entries_.rbegin(), end = entries_.rend(); count > 0 && it != end; count--, ++it) {
Man Cao41656de2015-07-06 18:53:15 -0700109 buffered_visitor.VisitRootIfNonNull(it->second->GetClassGcRoot());
Man Cao42c3c332015-06-23 16:38:25 -0700110 }
111}
112
Mathieu Chartier97509952015-07-13 14:35:43 -0700113static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
Mathieu Chartier90443472015-07-16 20:32:27 -0700114 SHARED_REQUIRES(Locks::mutator_lock_)
115 REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao42c3c332015-06-23 16:38:25 -0700116 GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
117 // This does not need a read barrier because this is called by GC.
118 mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
Mathieu Chartier97509952015-07-13 14:35:43 -0700119 if (old_object != nullptr) {
120 // The class object can become null if we implement class unloading.
121 // In that case we might still want to keep the class name string (not implemented).
122 mirror::Object* new_object = visitor->IsMarked(old_object);
123 DCHECK(new_object != nullptr);
124 if (UNLIKELY(old_object != new_object)) {
125 klass = GcRoot<mirror::Class>(new_object->AsClass());
126 }
Man Cao1ed11b92015-06-11 22:47:35 -0700127 }
128}
129
Mathieu Chartier97509952015-07-13 14:35:43 -0700130void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
Man Cao8c2ff642015-05-27 17:25:30 -0700131 VLOG(heap) << "Start SweepAllocationRecords()";
Man Cao42c3c332015-06-23 16:38:25 -0700132 size_t count_deleted = 0, count_moved = 0, count = 0;
133 // Only the first (size - recent_record_max_) number of records can be deleted.
134 size_t delete_bound;
135 if (entries_.size() <= recent_record_max_) {
136 delete_bound = 0;
137 } else {
138 delete_bound = entries_.size() - recent_record_max_;
139 }
Man Cao8c2ff642015-05-27 17:25:30 -0700140 for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
Man Cao42c3c332015-06-23 16:38:25 -0700141 ++count;
Man Cao8c2ff642015-05-27 17:25:30 -0700142 // This does not need a read barrier because this is called by GC.
143 mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
144 AllocRecord* record = it->second;
Mathieu Chartier97509952015-07-13 14:35:43 -0700145 mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
Man Cao8c2ff642015-05-27 17:25:30 -0700146 if (new_object == nullptr) {
Man Cao42c3c332015-06-23 16:38:25 -0700147 if (count > delete_bound) {
148 it->first = GcRoot<mirror::Object>(nullptr);
Mathieu Chartier97509952015-07-13 14:35:43 -0700149 SweepClassObject(record, visitor);
Man Cao42c3c332015-06-23 16:38:25 -0700150 ++it;
151 } else {
152 delete record;
153 it = entries_.erase(it);
154 ++count_deleted;
155 }
Man Cao8c2ff642015-05-27 17:25:30 -0700156 } else {
157 if (old_object != new_object) {
158 it->first = GcRoot<mirror::Object>(new_object);
159 ++count_moved;
160 }
Mathieu Chartier97509952015-07-13 14:35:43 -0700161 SweepClassObject(record, visitor);
Man Cao8c2ff642015-05-27 17:25:30 -0700162 ++it;
163 }
164 }
165 VLOG(heap) << "Deleted " << count_deleted << " allocation records";
166 VLOG(heap) << "Updated " << count_moved << " allocation records";
167}
168
Man Cao42c3c332015-06-23 16:38:25 -0700169void AllocRecordObjectMap::AllowNewAllocationRecords() {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700170 CHECK(!kUseReadBarrier);
Man Cao42c3c332015-06-23 16:38:25 -0700171 allow_new_record_ = true;
172 new_record_condition_.Broadcast(Thread::Current());
173}
174
175void AllocRecordObjectMap::DisallowNewAllocationRecords() {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700176 CHECK(!kUseReadBarrier);
Man Cao42c3c332015-06-23 16:38:25 -0700177 allow_new_record_ = false;
178}
179
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700180void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
181 CHECK(kUseReadBarrier);
182 new_record_condition_.Broadcast(Thread::Current());
183}
184
Man Cao8c2ff642015-05-27 17:25:30 -0700185struct AllocRecordStackVisitor : public StackVisitor {
186 AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
Mathieu Chartier90443472015-07-16 20:32:27 -0700187 SHARED_REQUIRES(Locks::mutator_lock_)
Man Cao8c2ff642015-05-27 17:25:30 -0700188 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
189 trace(trace_in),
190 depth(0),
191 max_depth(max) {}
192
193 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
194 // annotalysis.
195 bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
196 if (depth >= max_depth) {
197 return false;
198 }
199 ArtMethod* m = GetMethod();
200 if (!m->IsRuntimeMethod()) {
201 trace->SetStackElementAt(depth, m, GetDexPc());
202 ++depth;
203 }
204 return true;
205 }
206
207 ~AllocRecordStackVisitor() {
208 trace->SetDepth(depth);
209 }
210
211 AllocRecordStackTrace* trace;
212 size_t depth;
213 const size_t max_depth;
214};
215
216void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
217 Thread* self = Thread::Current();
218 Heap* heap = Runtime::Current()->GetHeap();
219 if (enable) {
220 {
221 MutexLock mu(self, *Locks::alloc_tracker_lock_);
222 if (heap->IsAllocTrackingEnabled()) {
223 return; // Already enabled, bail.
224 }
225 AllocRecordObjectMap* records = new AllocRecordObjectMap();
226 CHECK(records != nullptr);
227 records->SetProperties();
228 std::string self_name;
229 self->GetThreadName(self_name);
230 if (self_name == "JDWP") {
231 records->alloc_ddm_thread_id_ = self->GetTid();
232 }
Man Cao1ed11b92015-06-11 22:47:35 -0700233 records->scratch_trace_.SetDepth(records->max_stack_depth_);
Man Cao8c2ff642015-05-27 17:25:30 -0700234 size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
235 sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
236 LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
237 << records->max_stack_depth_ << " frames, taking up to "
238 << PrettySize(sz * records->alloc_record_max_) << ")";
239 heap->SetAllocationRecords(records);
240 heap->SetAllocTrackingEnabled(true);
241 }
242 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
243 } else {
244 {
245 MutexLock mu(self, *Locks::alloc_tracker_lock_);
246 if (!heap->IsAllocTrackingEnabled()) {
247 return; // Already disabled, bail.
248 }
249 heap->SetAllocTrackingEnabled(false);
250 LOG(INFO) << "Disabling alloc tracker";
251 heap->SetAllocationRecords(nullptr);
252 }
253 // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
254 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
255 }
256}
257
Man Cao42c3c332015-06-23 16:38:25 -0700258void AllocRecordObjectMap::RecordAllocation(Thread* self, mirror::Object* obj, mirror::Class* klass,
259 size_t byte_count) {
Man Cao8c2ff642015-05-27 17:25:30 -0700260 MutexLock mu(self, *Locks::alloc_tracker_lock_);
261 Heap* heap = Runtime::Current()->GetHeap();
262 if (!heap->IsAllocTrackingEnabled()) {
263 // In the process of shutting down recording, bail.
264 return;
265 }
266
267 AllocRecordObjectMap* records = heap->GetAllocationRecords();
268 DCHECK(records != nullptr);
269
270 // Do not record for DDM thread
271 if (records->alloc_ddm_thread_id_ == self->GetTid()) {
272 return;
273 }
274
Man Cao42c3c332015-06-23 16:38:25 -0700275 // Wait for GC's sweeping to complete and allow new records
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700276 while (UNLIKELY((!kUseReadBarrier && !records->allow_new_record_) ||
277 (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
Man Cao42c3c332015-06-23 16:38:25 -0700278 records->new_record_condition_.WaitHoldingLocks(self);
279 }
280
Man Cao8c2ff642015-05-27 17:25:30 -0700281 DCHECK_LE(records->Size(), records->alloc_record_max_);
282
Man Cao8c2ff642015-05-27 17:25:30 -0700283 // Get stack trace.
Man Cao1ed11b92015-06-11 22:47:35 -0700284 // add scope to make "visitor" destroyed promptly, in order to set the scratch_trace_->depth_
Man Cao8c2ff642015-05-27 17:25:30 -0700285 {
Man Cao1ed11b92015-06-11 22:47:35 -0700286 AllocRecordStackVisitor visitor(self, &records->scratch_trace_, records->max_stack_depth_);
Man Cao8c2ff642015-05-27 17:25:30 -0700287 visitor.WalkStack();
288 }
Man Cao1ed11b92015-06-11 22:47:35 -0700289 records->scratch_trace_.SetTid(self->GetTid());
290 AllocRecordStackTrace* trace = new AllocRecordStackTrace(records->scratch_trace_);
Man Cao8c2ff642015-05-27 17:25:30 -0700291
292 // Fill in the basics.
Man Cao42c3c332015-06-23 16:38:25 -0700293 AllocRecord* record = new AllocRecord(byte_count, klass, trace);
Man Cao8c2ff642015-05-27 17:25:30 -0700294
295 records->Put(obj, record);
296 DCHECK_LE(records->Size(), records->alloc_record_max_);
297}
298
299} // namespace gc
300} // namespace art