Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |
| 18 | #define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 19 | |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 20 | #include <stdint.h> |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 21 | |
| 22 | #include <iosfwd> |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 23 | #include <limits> |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 24 | #include <string> |
| 25 | |
Andreas Gampe | dc061d0 | 2016-10-24 13:19:37 -0700 | [diff] [blame] | 26 | #include "base/bit_utils.h" |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 27 | #include "base/logging.h" |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 28 | #include "base/mutex.h" |
Hiroshi Yamauchi | 94f7b49 | 2014-07-22 18:08:23 -0700 | [diff] [blame] | 29 | #include "gc_root.h" |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 30 | #include "obj_ptr.h" |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 31 | #include "object_callbacks.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 32 | #include "offsets.h" |
Hiroshi Yamauchi | 94f7b49 | 2014-07-22 18:08:23 -0700 | [diff] [blame] | 33 | #include "read_barrier_option.h" |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 34 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 35 | namespace art { |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 36 | |
Mathieu Chartier | e34fa1d | 2015-01-14 14:55:47 -0800 | [diff] [blame] | 37 | class RootInfo; |
| 38 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 39 | namespace mirror { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 40 | class Object; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 41 | } // namespace mirror |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 42 | |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 43 | class MemMap; |
| 44 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 45 | // Maintain a table of indirect references. Used for local/global JNI references. |
| 46 | // |
| 47 | // The table contains object references, where the strong (local/global) references are part of the |
| 48 | // GC root set (but not the weak global references). When an object is added we return an |
| 49 | // IndirectRef that is not a valid pointer but can be used to find the original value in O(1) time. |
| 50 | // Conversions to and from indirect references are performed on upcalls and downcalls, so they need |
| 51 | // to be very fast. |
| 52 | // |
| 53 | // To be efficient for JNI local variable storage, we need to provide operations that allow us to |
| 54 | // operate on segments of the table, where segments are pushed and popped as if on a stack. For |
| 55 | // example, deletion of an entry should only succeed if it appears in the current segment, and we |
| 56 | // want to be able to strip off the current segment quickly when a method returns. Additions to the |
| 57 | // table must be made in the current segment even if space is available in an earlier area. |
| 58 | // |
| 59 | // A new segment is created when we call into native code from interpreted code, or when we handle |
| 60 | // the JNI PushLocalFrame function. |
| 61 | // |
| 62 | // The GC must be able to scan the entire table quickly. |
| 63 | // |
| 64 | // In summary, these must be very fast: |
| 65 | // - adding or removing a segment |
| 66 | // - adding references to a new segment |
| 67 | // - converting an indirect reference back to an Object |
| 68 | // These can be a little slower, but must still be pretty quick: |
| 69 | // - adding references to a "mature" segment |
| 70 | // - removing individual references |
| 71 | // - scanning the entire table straight through |
| 72 | // |
| 73 | // If there's more than one segment, we don't guarantee that the table will fill completely before |
| 74 | // we fail due to lack of space. We do ensure that the current segment will pack tightly, which |
| 75 | // should satisfy JNI requirements (e.g. EnsureLocalCapacity). |
| 76 | // |
| 77 | // Only SynchronizedGet is synchronized. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 78 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 79 | // Indirect reference definition. This must be interchangeable with JNI's jobject, and it's |
| 80 | // convenient to let null be null, so we use void*. |
| 81 | // |
| 82 | // We need a (potentially) large table index and a 2-bit reference type (global, local, weak |
| 83 | // global). We also reserve some bits to be used to detect stale indirect references: we put a |
| 84 | // serial number in the extra bits, and keep a copy of the serial number in the table. This requires |
| 85 | // more memory and additional memory accesses on add/get, but is moving-GC safe. It will catch |
| 86 | // additional problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj, |
| 87 | // lookup iref1. A pattern based on object bits will miss this. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 88 | typedef void* IndirectRef; |
| 89 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 90 | // Indirect reference kind, used as the two low bits of IndirectRef. |
| 91 | // |
| 92 | // For convenience these match up with enum jobjectRefType from jni.h. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 93 | enum IndirectRefKind { |
Andreas Gampe | dc061d0 | 2016-10-24 13:19:37 -0700 | [diff] [blame] | 94 | kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>> |
| 95 | kLocal = 1, // <<local reference>> |
| 96 | kGlobal = 2, // <<global reference>> |
| 97 | kWeakGlobal = 3, // <<weak global reference>> |
| 98 | kLastKind = kWeakGlobal |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 99 | }; |
Elliott Hughes | 0e57ccb | 2012-04-03 16:04:52 -0700 | [diff] [blame] | 100 | std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs); |
Andreas Gampe | f1e8630 | 2016-10-03 11:42:31 -0700 | [diff] [blame] | 101 | const char* GetIndirectRefKindString(const IndirectRefKind& kind); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 102 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 103 | // Table definition. |
| 104 | // |
| 105 | // For the global reference table, the expected common operations are adding a new entry and |
| 106 | // removing a recently-added entry (usually the most-recently-added entry). For JNI local |
| 107 | // references, the common operations are adding a new entry and removing an entire table segment. |
| 108 | // |
| 109 | // If we delete entries from the middle of the list, we will be left with "holes". We track the |
| 110 | // number of holes so that, when adding new elements, we can quickly decide to do a trivial append |
| 111 | // or go slot-hunting. |
| 112 | // |
| 113 | // When the top-most entry is removed, any holes immediately below it are also removed. Thus, |
| 114 | // deletion of an entry may reduce "top_index" by more than one. |
| 115 | // |
| 116 | // To get the desired behavior for JNI locals, we need to know the bottom and top of the current |
| 117 | // "segment". The top is managed internally, and the bottom is passed in as a function argument. |
| 118 | // When we call a native method or push a local frame, the current top index gets pushed on, and |
| 119 | // serves as the new bottom. When we pop a frame off, the value from the stack becomes the new top |
| 120 | // index, and the value stored in the previous frame becomes the new bottom. |
| 121 | // |
| 122 | // Holes are being locally cached for the segment. Otherwise we'd have to pass bottom index and |
| 123 | // number of holes, which restricts us to 16 bits for the top index. The value is cached within the |
| 124 | // table. To avoid code in generated JNI transitions, which implicitly form segments, the code for |
| 125 | // adding and removing references needs to detect the change of a segment. Helper fields are used |
| 126 | // for this detection. |
| 127 | // |
| 128 | // Common alternative implementation: make IndirectRef a pointer to the actual reference slot. |
| 129 | // Instead of getting a table and doing a lookup, the lookup can be done instantly. Operations like |
| 130 | // determining the type and deleting the reference are more expensive because the table must be |
| 131 | // hunted for (i.e. you have to do a pointer comparison to see which table it's in), you can't move |
| 132 | // the table when expanding it (so realloc() is out), and tricks like serial number checking to |
| 133 | // detect stale references aren't possible (though we may be able to get similar benefits with other |
| 134 | // approaches). |
| 135 | // |
| 136 | // TODO: consider a "lastDeleteIndex" for quick hole-filling when an add immediately follows a |
| 137 | // delete; must invalidate after segment pop might be worth only using it for JNI globals. |
| 138 | // |
| 139 | // TODO: may want completely different add/remove algorithms for global and local refs to improve |
| 140 | // performance. A large circular buffer might reduce the amortized cost of adding global |
| 141 | // references. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 142 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 143 | // The state of the current segment. We only store the index. Splitting it for index and hole |
| 144 | // count restricts the range too much. |
| 145 | struct IRTSegmentState { |
| 146 | uint32_t top_index; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 147 | }; |
| 148 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 149 | // Use as initial value for "cookie", and when table has only one segment. |
| 150 | static constexpr IRTSegmentState kIRTFirstSegment = { 0 }; |
| 151 | |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 152 | // Try to choose kIRTPrevCount so that sizeof(IrtEntry) is a power of 2. |
| 153 | // Contains multiple entries but only one active one, this helps us detect use after free errors |
| 154 | // since the serial stored in the indirect ref wont match. |
Andreas Gampe | dc061d0 | 2016-10-24 13:19:37 -0700 | [diff] [blame] | 155 | static constexpr size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3; |
| 156 | |
Hiroshi Yamauchi | 9e47bfa | 2015-02-23 11:14:40 -0800 | [diff] [blame] | 157 | class IrtEntry { |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 158 | public: |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 159 | void Add(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_); |
| 160 | |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 161 | GcRoot<mirror::Object>* GetReference() { |
| 162 | DCHECK_LT(serial_, kIRTPrevCount); |
| 163 | return &references_[serial_]; |
| 164 | } |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 165 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 166 | const GcRoot<mirror::Object>* GetReference() const { |
| 167 | DCHECK_LT(serial_, kIRTPrevCount); |
| 168 | return &references_[serial_]; |
| 169 | } |
| 170 | |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 171 | uint32_t GetSerial() const { |
| 172 | return serial_; |
| 173 | } |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 174 | |
| 175 | void SetReference(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 176 | |
| 177 | private: |
| 178 | uint32_t serial_; |
| 179 | GcRoot<mirror::Object> references_[kIRTPrevCount]; |
| 180 | }; |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 181 | static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t), |
Hiroshi Yamauchi | 9e47bfa | 2015-02-23 11:14:40 -0800 | [diff] [blame] | 182 | "Unexpected sizeof(IrtEntry)"); |
Andreas Gampe | dc061d0 | 2016-10-24 13:19:37 -0700 | [diff] [blame] | 183 | static_assert(IsPowerOfTwo(sizeof(IrtEntry)), "Unexpected sizeof(IrtEntry)"); |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 184 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 185 | class IrtIterator { |
| 186 | public: |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 187 | IrtIterator(IrtEntry* table, size_t i, size_t capacity) REQUIRES_SHARED(Locks::mutator_lock_) |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 188 | : table_(table), i_(i), capacity_(capacity) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 189 | } |
| 190 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 191 | IrtIterator& operator++() REQUIRES_SHARED(Locks::mutator_lock_) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 192 | ++i_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 193 | return *this; |
| 194 | } |
| 195 | |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 196 | GcRoot<mirror::Object>* operator*() REQUIRES_SHARED(Locks::mutator_lock_) { |
Hiroshi Yamauchi | ea2e1bd | 2014-06-18 13:47:35 -0700 | [diff] [blame] | 197 | // This does not have a read barrier as this is used to visit roots. |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 198 | return table_[i_].GetReference(); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 199 | } |
| 200 | |
| 201 | bool equals(const IrtIterator& rhs) const { |
| 202 | return (i_ == rhs.i_ && table_ == rhs.table_); |
| 203 | } |
| 204 | |
| 205 | private: |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 206 | IrtEntry* const table_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 207 | size_t i_; |
Ian Rogers | c0542af | 2014-09-03 16:16:56 -0700 | [diff] [blame] | 208 | const size_t capacity_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 209 | }; |
| 210 | |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 211 | bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) { |
| 212 | return lhs.equals(rhs); |
| 213 | } |
| 214 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 215 | bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) { |
| 216 | return !lhs.equals(rhs); |
| 217 | } |
| 218 | |
| 219 | class IndirectReferenceTable { |
| 220 | public: |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 221 | enum class ResizableCapacity { |
| 222 | kNo, |
| 223 | kYes |
| 224 | }; |
| 225 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 226 | // WARNING: Construction of the IndirectReferenceTable may fail. |
| 227 | // error_msg must not be null. If error_msg is set by the constructor, then |
| 228 | // construction has failed and the IndirectReferenceTable will be in an |
| 229 | // invalid state. Use IsValid to check whether the object is in an invalid |
| 230 | // state. |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 231 | IndirectReferenceTable(size_t max_count, |
| 232 | IndirectRefKind kind, |
| 233 | ResizableCapacity resizable, |
| 234 | std::string* error_msg); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 235 | |
| 236 | ~IndirectReferenceTable(); |
| 237 | |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 238 | /* |
| 239 | * Checks whether construction of the IndirectReferenceTable succeeded. |
| 240 | * |
| 241 | * This object must only be used if IsValid() returns true. It is safe to |
| 242 | * call IsValid from multiple threads without locking or other explicit |
| 243 | * synchronization. |
| 244 | */ |
Andreas Gampe | 3f5881f | 2015-04-08 10:26:16 -0700 | [diff] [blame] | 245 | bool IsValid() const; |
| 246 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 247 | // Add a new entry. "obj" must be a valid non-null object reference. This function will |
| 248 | // abort if the table is full (max entries reached, or expansion failed). |
| 249 | IndirectRef Add(IRTSegmentState previous_state, ObjPtr<mirror::Object> obj) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 250 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 251 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 252 | // Given an IndirectRef in the table, return the Object it refers to. |
| 253 | // |
| 254 | // This function may abort under error conditions. |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 255 | template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 256 | ObjPtr<mirror::Object> Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 257 | ALWAYS_INLINE; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 258 | |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 259 | // Synchronized get which reads a reference, acquiring a lock if necessary. |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 260 | template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 261 | ObjPtr<mirror::Object> SynchronizedGet(IndirectRef iref) const |
| 262 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 263 | return Get<kReadBarrierOption>(iref); |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 264 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 265 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 266 | // Updates an existing indirect reference to point to a new object. |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 267 | void Update(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_); |
Jeff Hao | 39b6c24 | 2015-05-19 20:30:23 -0700 | [diff] [blame] | 268 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 269 | // Remove an existing entry. |
| 270 | // |
| 271 | // If the entry is not between the current top index and the bottom index |
| 272 | // specified by the cookie, we don't remove anything. This is the behavior |
| 273 | // required by JNI's DeleteLocalRef function. |
| 274 | // |
| 275 | // Returns "false" if nothing was removed. |
| 276 | bool Remove(IRTSegmentState previous_state, IndirectRef iref); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 277 | |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 278 | void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 279 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 280 | void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 281 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 282 | // Return the #of entries in the entire table. This includes holes, and |
| 283 | // so may be larger than the actual number of "live" entries. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 284 | size_t Capacity() const { |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 285 | return segment_state_.top_index; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 286 | } |
| 287 | |
Hiroshi Yamauchi | ea2e1bd | 2014-06-18 13:47:35 -0700 | [diff] [blame] | 288 | // Note IrtIterator does not have a read barrier as it's used to visit roots. |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 289 | IrtIterator begin() { |
| 290 | return IrtIterator(table_, 0, Capacity()); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 291 | } |
| 292 | |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 293 | IrtIterator end() { |
| 294 | return IrtIterator(table_, Capacity(), Capacity()); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 295 | } |
| 296 | |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 297 | void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 298 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 299 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 300 | IRTSegmentState GetSegmentState() const { |
| 301 | return segment_state_; |
Ian Rogers | ad25ac5 | 2011-10-04 19:13:33 -0700 | [diff] [blame] | 302 | } |
| 303 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 304 | void SetSegmentState(IRTSegmentState new_state); |
Ian Rogers | ad25ac5 | 2011-10-04 19:13:33 -0700 | [diff] [blame] | 305 | |
Andreas Gampe | 4d98c84 | 2015-12-09 15:14:04 -0800 | [diff] [blame] | 306 | static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) { |
| 307 | // Note: Currently segment_state_ is at offset 0. We're testing the expected value in |
| 308 | // jni_internal_test to make sure it stays correct. It is not OFFSETOF_MEMBER, as that |
| 309 | // is not pointer-size-safe. |
| 310 | return Offset(0); |
Ian Rogers | dc51b79 | 2011-09-22 20:41:37 -0700 | [diff] [blame] | 311 | } |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 312 | |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 313 | // Release pages past the end of the table that may have previously held references. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 314 | void Trim() REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 315 | |
Andreas Gampe | dc061d0 | 2016-10-24 13:19:37 -0700 | [diff] [blame] | 316 | // Determine what kind of indirect reference this is. Opposite of EncodeIndirectRefKind. |
| 317 | ALWAYS_INLINE static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) { |
| 318 | return DecodeIndirectRefKind(reinterpret_cast<uintptr_t>(iref)); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 319 | } |
| 320 | |
Andreas Gampe | dc061d0 | 2016-10-24 13:19:37 -0700 | [diff] [blame] | 321 | private: |
| 322 | static constexpr size_t kSerialBits = MinimumBitsToStore(kIRTPrevCount); |
| 323 | static constexpr uint32_t kShiftedSerialMask = (1u << kSerialBits) - 1; |
| 324 | |
| 325 | static constexpr size_t kKindBits = MinimumBitsToStore( |
| 326 | static_cast<uint32_t>(IndirectRefKind::kLastKind)); |
| 327 | static constexpr uint32_t kKindMask = (1u << kKindBits) - 1; |
| 328 | |
| 329 | static constexpr uintptr_t EncodeIndex(uint32_t table_index) { |
| 330 | static_assert(sizeof(IndirectRef) == sizeof(uintptr_t), "Unexpected IndirectRef size"); |
| 331 | DCHECK_LE(MinimumBitsToStore(table_index), BitSizeOf<uintptr_t>() - kSerialBits - kKindBits); |
| 332 | return (static_cast<uintptr_t>(table_index) << kKindBits << kSerialBits); |
| 333 | } |
| 334 | static constexpr uint32_t DecodeIndex(uintptr_t uref) { |
| 335 | return static_cast<uint32_t>((uref >> kKindBits) >> kSerialBits); |
| 336 | } |
| 337 | |
| 338 | static constexpr uintptr_t EncodeIndirectRefKind(IndirectRefKind kind) { |
| 339 | return static_cast<uintptr_t>(kind); |
| 340 | } |
| 341 | static constexpr IndirectRefKind DecodeIndirectRefKind(uintptr_t uref) { |
| 342 | return static_cast<IndirectRefKind>(uref & kKindMask); |
| 343 | } |
| 344 | |
| 345 | static constexpr uintptr_t EncodeSerial(uint32_t serial) { |
| 346 | DCHECK_LE(MinimumBitsToStore(serial), kSerialBits); |
| 347 | return serial << kKindBits; |
| 348 | } |
| 349 | static constexpr uint32_t DecodeSerial(uintptr_t uref) { |
| 350 | return static_cast<uint32_t>(uref >> kKindBits) & kShiftedSerialMask; |
| 351 | } |
| 352 | |
| 353 | constexpr uintptr_t EncodeIndirectRef(uint32_t table_index, uint32_t serial) const { |
| 354 | DCHECK_LT(table_index, max_entries_); |
| 355 | return EncodeIndex(table_index) | EncodeSerial(serial) | EncodeIndirectRefKind(kind_); |
| 356 | } |
| 357 | |
| 358 | static void ConstexprChecks(); |
| 359 | |
| 360 | // Extract the table index from an indirect reference. |
| 361 | ALWAYS_INLINE static uint32_t ExtractIndex(IndirectRef iref) { |
| 362 | return DecodeIndex(reinterpret_cast<uintptr_t>(iref)); |
| 363 | } |
| 364 | |
| 365 | IndirectRef ToIndirectRef(uint32_t table_index) const { |
| 366 | DCHECK_LT(table_index, max_entries_); |
| 367 | uint32_t serial = table_[table_index].GetSerial(); |
| 368 | return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial)); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 369 | } |
| 370 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 371 | // Resize the backing table. Currently must be larger than the current size. |
| 372 | bool Resize(size_t new_size, std::string* error_msg); |
| 373 | |
| 374 | void RecoverHoles(IRTSegmentState from); |
| 375 | |
Andreas Gampe | f1e8630 | 2016-10-03 11:42:31 -0700 | [diff] [blame] | 376 | // Abort if check_jni is not enabled. Otherwise, just log as an error. |
| 377 | static void AbortIfNoCheckJNI(const std::string& msg); |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 378 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 379 | /* extra debugging checks */ |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 380 | bool GetChecked(IndirectRef) const REQUIRES_SHARED(Locks::mutator_lock_); |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 381 | bool CheckEntry(const char*, IndirectRef, uint32_t) const; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 382 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 383 | /// semi-public - read/write by jni down calls. |
Ian Rogers | dc51b79 | 2011-09-22 20:41:37 -0700 | [diff] [blame] | 384 | IRTSegmentState segment_state_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 385 | |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 386 | // Mem map where we store the indirect refs. |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 387 | std::unique_ptr<MemMap> table_mem_map_; |
Hiroshi Yamauchi | ea2e1bd | 2014-06-18 13:47:35 -0700 | [diff] [blame] | 388 | // bottom of the stack. Do not directly access the object references |
| 389 | // in this as they are roots. Use Get() that has a read barrier. |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 390 | IrtEntry* table_; |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 391 | // bit mask, ORed into all irefs. |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 392 | const IndirectRefKind kind_; |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 393 | |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 394 | // max #of entries allowed (modulo resizing). |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 395 | size_t max_entries_; |
| 396 | |
| 397 | // Some values to retain old behavior with holes. Description of the algorithm is in the .cc |
| 398 | // file. |
| 399 | // TODO: Consider other data structures for compact tables, e.g., free lists. |
| 400 | size_t current_num_holes_; |
| 401 | IRTSegmentState last_known_previous_state_; |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 402 | |
| 403 | // Whether the table's capacity may be resized. As there are no locks used, it is the caller's |
| 404 | // responsibility to ensure thread-safety. |
| 405 | ResizableCapacity resizable_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 406 | }; |
| 407 | |
| 408 | } // namespace art |
| 409 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 410 | #endif // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |