blob: b67e9c29b42ba85cf9334ce55b5f566a7d71bd1b [file] [log] [blame]
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ROOT_H_
18#define ART_RUNTIME_GC_ROOT_H_
19
Mathieu Chartierbad02672014-08-25 13:08:22 -070020#include "base/macros.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070021#include "base/mutex.h" // For Locks::mutator_lock_.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070022#include "mirror/object_reference.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070023
24namespace art {
25
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -080026namespace mirror {
27class Object;
28} // namespace mirror
29
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070030template <size_t kBufferSize>
31class BufferedRootVisitor;
32
Mathieu Chartier4809d0a2015-04-07 10:39:04 -070033// Dependent on pointer size so that we don't have frames that are too big on 64 bit.
34static const size_t kDefaultBufferedRootCount = 1024 / sizeof(void*);
35
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -080036enum RootType {
37 kRootUnknown = 0,
38 kRootJNIGlobal,
39 kRootJNILocal,
40 kRootJavaFrame,
41 kRootNativeStack,
42 kRootStickyClass,
43 kRootThreadBlock,
44 kRootMonitorUsed,
45 kRootThreadObject,
46 kRootInternedString,
47 kRootDebugger,
48 kRootVMInternal,
49 kRootJNIMonitor,
50};
51std::ostream& operator<<(std::ostream& os, const RootType& root_type);
52
Mathieu Chartierd3ed9a32015-04-10 14:23:35 -070053// Only used by hprof. thread_id_ and type_ are only used by hprof.
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -080054class RootInfo {
55 public:
56 // Thread id 0 is for non thread roots.
57 explicit RootInfo(RootType type, uint32_t thread_id = 0)
58 : type_(type), thread_id_(thread_id) {
59 }
Andreas Gampe758a8012015-04-03 21:28:42 -070060 RootInfo(const RootInfo&) = default;
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -080061 virtual ~RootInfo() {
62 }
63 RootType GetType() const {
64 return type_;
65 }
66 uint32_t GetThreadId() const {
67 return thread_id_;
68 }
69 virtual void Describe(std::ostream& os) const {
70 os << "Type=" << type_ << " thread_id=" << thread_id_;
71 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070072 std::string ToString() const;
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -080073
74 private:
75 const RootType type_;
76 const uint32_t thread_id_;
77};
78
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070079inline std::ostream& operator<<(std::ostream& os, const RootInfo& root_info) {
80 root_info.Describe(os);
81 return os;
82}
83
84class RootVisitor {
85 public:
86 virtual ~RootVisitor() { }
87
Mathieu Chartierd3ed9a32015-04-10 14:23:35 -070088 // Single root version, not overridable.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070089 ALWAYS_INLINE void VisitRoot(mirror::Object** roots, const RootInfo& info)
90 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
91 VisitRoots(&roots, 1, info);
92 }
93
Mathieu Chartierd3ed9a32015-04-10 14:23:35 -070094 // Single root version, not overridable.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070095 ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** roots, const RootInfo& info)
96 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
97 if (*roots != nullptr) {
98 VisitRoot(roots, info);
99 }
100 }
101
102 virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
103 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
104
105 virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
106 const RootInfo& info)
107 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
108};
109
110// Only visits roots one at a time, doesn't handle updating roots. Used when performance isn't
111// critical.
112class SingleRootVisitor : public RootVisitor {
113 private:
114 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
115 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
116 for (size_t i = 0; i < count; ++i) {
117 VisitRoot(*roots[i], info);
118 }
119 }
120
121 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
122 const RootInfo& info) OVERRIDE
123 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
124 for (size_t i = 0; i < count; ++i) {
125 VisitRoot(roots[i]->AsMirrorPtr(), info);
126 }
127 }
128
129 virtual void VisitRoot(mirror::Object* root, const RootInfo& info) = 0;
130};
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -0800131
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700132template<class MirrorType>
Hiroshi Yamauchi9e47bfa2015-02-23 11:14:40 -0800133class GcRoot {
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700134 public:
135 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800136 ALWAYS_INLINE MirrorType* Read() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700137
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700138 void VisitRoot(RootVisitor* visitor, const RootInfo& info) const
139 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700140 DCHECK(!IsNull());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700141 mirror::CompressedReference<mirror::Object>* roots[1] = { &root_ };
142 visitor->VisitRoots(roots, 1u, info);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700143 DCHECK(!IsNull());
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700144 }
145
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700146 void VisitRootIfNonNull(RootVisitor* visitor, const RootInfo& info) const
147 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -0800148 if (!IsNull()) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700149 VisitRoot(visitor, info);
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -0800150 }
151 }
152
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700153 ALWAYS_INLINE mirror::CompressedReference<mirror::Object>* AddressWithoutBarrier() {
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700154 return &root_;
155 }
156
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700157 ALWAYS_INLINE bool IsNull() const {
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700158 // It's safe to null-check it without a read barrier.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700159 return root_.IsNull();
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700160 }
161
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700162 ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
163
164 private:
Mathieu Chartier9086b652015-04-14 09:35:18 -0700165 // Root visitors take pointers to root_ and place the min CompressedReference** arrays. We use a
166 // CompressedReference<mirror::Object> here since it violates strict aliasing requirements to
167 // cast CompressedReference<MirrorType>* to CompressedReference<mirror::Object>*.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700168 mutable mirror::CompressedReference<mirror::Object> root_;
169
170 template <size_t kBufferSize> friend class BufferedRootVisitor;
171};
172
173// Simple data structure for buffered root visiting to avoid virtual dispatch overhead. Currently
174// only for CompressedReferences since these are more common than the Object** roots which are only
175// for thread local roots.
176template <size_t kBufferSize>
177class BufferedRootVisitor {
178 public:
179 BufferedRootVisitor(RootVisitor* visitor, const RootInfo& root_info)
180 : visitor_(visitor), root_info_(root_info), buffer_pos_(0) {
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700181 }
182
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700183 ~BufferedRootVisitor() {
184 Flush();
185 }
186
187 template <class MirrorType>
188 ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root)
189 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
190 if (!root.IsNull()) {
191 VisitRoot(root);
192 }
193 }
194
195 template <class MirrorType>
196 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
197 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
198 if (!root->IsNull()) {
199 VisitRoot(root);
200 }
201 }
202
203 template <class MirrorType>
204 void VisitRoot(GcRoot<MirrorType>& root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
205 VisitRoot(root.AddressWithoutBarrier());
206 }
207
208 template <class MirrorType>
209 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
210 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
211 if (UNLIKELY(buffer_pos_ >= kBufferSize)) {
212 Flush();
213 }
214 roots_[buffer_pos_++] = root;
215 }
216
217 void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
218 visitor_->VisitRoots(roots_, buffer_pos_, root_info_);
219 buffer_pos_ = 0;
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700220 }
221
222 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700223 RootVisitor* const visitor_;
224 RootInfo root_info_;
225 mirror::CompressedReference<mirror::Object>* roots_[kBufferSize];
226 size_t buffer_pos_;
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -0700227};
228
229} // namespace art
230
231#endif // ART_RUNTIME_GC_ROOT_H_