Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |
| 18 | #define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 19 | |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 20 | #include <stdint.h> |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 21 | |
| 22 | #include <iosfwd> |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 23 | #include <string> |
| 24 | |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 25 | #include "base/logging.h" |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 26 | #include "base/mutex.h" |
Hiroshi Yamauchi | 94f7b49 | 2014-07-22 18:08:23 -0700 | [diff] [blame] | 27 | #include "gc_root.h" |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 28 | #include "obj_ptr.h" |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 29 | #include "object_callbacks.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 30 | #include "offsets.h" |
Hiroshi Yamauchi | 94f7b49 | 2014-07-22 18:08:23 -0700 | [diff] [blame] | 31 | #include "read_barrier_option.h" |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 32 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 33 | namespace art { |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 34 | |
Mathieu Chartier | e34fa1d | 2015-01-14 14:55:47 -0800 | [diff] [blame] | 35 | class RootInfo; |
| 36 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 37 | namespace mirror { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 38 | class Object; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 39 | } // namespace mirror |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 40 | |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 41 | class MemMap; |
| 42 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 43 | /* |
| 44 | * Maintain a table of indirect references. Used for local/global JNI |
| 45 | * references. |
| 46 | * |
| 47 | * The table contains object references that are part of the GC root set. |
| 48 | * When an object is added we return an IndirectRef that is not a valid |
| 49 | * pointer but can be used to find the original value in O(1) time. |
Elliott Hughes | 81ff318 | 2012-03-23 20:35:56 -0700 | [diff] [blame] | 50 | * Conversions to and from indirect references are performed on upcalls |
| 51 | * and downcalls, so they need to be very fast. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 52 | * |
| 53 | * To be efficient for JNI local variable storage, we need to provide |
| 54 | * operations that allow us to operate on segments of the table, where |
| 55 | * segments are pushed and popped as if on a stack. For example, deletion |
| 56 | * of an entry should only succeed if it appears in the current segment, |
| 57 | * and we want to be able to strip off the current segment quickly when |
| 58 | * a method returns. Additions to the table must be made in the current |
| 59 | * segment even if space is available in an earlier area. |
| 60 | * |
| 61 | * A new segment is created when we call into native code from interpreted |
| 62 | * code, or when we handle the JNI PushLocalFrame function. |
| 63 | * |
| 64 | * The GC must be able to scan the entire table quickly. |
| 65 | * |
| 66 | * In summary, these must be very fast: |
| 67 | * - adding or removing a segment |
| 68 | * - adding references to a new segment |
| 69 | * - converting an indirect reference back to an Object |
| 70 | * These can be a little slower, but must still be pretty quick: |
| 71 | * - adding references to a "mature" segment |
| 72 | * - removing individual references |
| 73 | * - scanning the entire table straight through |
| 74 | * |
| 75 | * If there's more than one segment, we don't guarantee that the table |
| 76 | * will fill completely before we fail due to lack of space. We do ensure |
| 77 | * that the current segment will pack tightly, which should satisfy JNI |
| 78 | * requirements (e.g. EnsureLocalCapacity). |
| 79 | * |
| 80 | * To make everything fit nicely in 32-bit integers, the maximum size of |
| 81 | * the table is capped at 64K. |
| 82 | * |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 83 | * Only SynchronizedGet is synchronized. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 84 | */ |
| 85 | |
| 86 | /* |
| 87 | * Indirect reference definition. This must be interchangeable with JNI's |
| 88 | * jobject, and it's convenient to let null be null, so we use void*. |
| 89 | * |
| 90 | * We need a 16-bit table index and a 2-bit reference type (global, local, |
| 91 | * weak global). Real object pointers will have zeroes in the low 2 or 3 |
| 92 | * bits (4- or 8-byte alignment), so it's useful to put the ref type |
| 93 | * in the low bits and reserve zero as an invalid value. |
| 94 | * |
| 95 | * The remaining 14 bits can be used to detect stale indirect references. |
| 96 | * For example, if objects don't move, we can use a hash of the original |
| 97 | * Object* to make sure the entry hasn't been re-used. (If the Object* |
| 98 | * we find there doesn't match because of heap movement, we could do a |
| 99 | * secondary check on the preserved hash value; this implies that creating |
| 100 | * a global/local ref queries the hash value and forces it to be saved.) |
| 101 | * |
| 102 | * A more rigorous approach would be to put a serial number in the extra |
| 103 | * bits, and keep a copy of the serial number in a parallel table. This is |
| 104 | * easier when objects can move, but requires 2x the memory and additional |
| 105 | * memory accesses on add/get. It will catch additional problems, e.g.: |
| 106 | * create iref1 for obj, delete iref1, create iref2 for same obj, lookup |
| 107 | * iref1. A pattern based on object bits will miss this. |
| 108 | */ |
| 109 | typedef void* IndirectRef; |
| 110 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 111 | /* |
| 112 | * Indirect reference kind, used as the two low bits of IndirectRef. |
| 113 | * |
| 114 | * For convenience these match up with enum jobjectRefType from jni.h. |
| 115 | */ |
| 116 | enum IndirectRefKind { |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 117 | kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>> |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 118 | kLocal = 1, // <<local reference>> |
| 119 | kGlobal = 2, // <<global reference>> |
| 120 | kWeakGlobal = 3 // <<weak global reference>> |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 121 | }; |
Elliott Hughes | 0e57ccb | 2012-04-03 16:04:52 -0700 | [diff] [blame] | 122 | std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs); |
Andreas Gampe | f1e8630 | 2016-10-03 11:42:31 -0700 | [diff] [blame] | 123 | const char* GetIndirectRefKindString(const IndirectRefKind& kind); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 124 | |
| 125 | /* |
| 126 | * Determine what kind of indirect reference this is. |
| 127 | */ |
| 128 | static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) { |
| 129 | return static_cast<IndirectRefKind>(reinterpret_cast<uintptr_t>(iref) & 0x03); |
| 130 | } |
| 131 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 132 | /* use as initial value for "cookie", and when table has only one segment */ |
| 133 | static const uint32_t IRT_FIRST_SEGMENT = 0; |
| 134 | |
| 135 | /* |
| 136 | * Table definition. |
| 137 | * |
| 138 | * For the global reference table, the expected common operations are |
| 139 | * adding a new entry and removing a recently-added entry (usually the |
| 140 | * most-recently-added entry). For JNI local references, the common |
| 141 | * operations are adding a new entry and removing an entire table segment. |
| 142 | * |
| 143 | * If "alloc_entries_" is not equal to "max_entries_", the table may expand |
| 144 | * when entries are added, which means the memory may move. If you want |
| 145 | * to keep pointers into "table" rather than offsets, you must use a |
| 146 | * fixed-size table. |
| 147 | * |
| 148 | * If we delete entries from the middle of the list, we will be left with |
| 149 | * "holes". We track the number of holes so that, when adding new elements, |
| 150 | * we can quickly decide to do a trivial append or go slot-hunting. |
| 151 | * |
| 152 | * When the top-most entry is removed, any holes immediately below it are |
| 153 | * also removed. Thus, deletion of an entry may reduce "topIndex" by more |
| 154 | * than one. |
| 155 | * |
| 156 | * To get the desired behavior for JNI locals, we need to know the bottom |
| 157 | * and top of the current "segment". The top is managed internally, and |
Elliott Hughes | 81ff318 | 2012-03-23 20:35:56 -0700 | [diff] [blame] | 158 | * the bottom is passed in as a function argument. When we call a native method or |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 159 | * push a local frame, the current top index gets pushed on, and serves |
| 160 | * as the new bottom. When we pop a frame off, the value from the stack |
| 161 | * becomes the new top index, and the value stored in the previous frame |
| 162 | * becomes the new bottom. |
| 163 | * |
| 164 | * To avoid having to re-scan the table after a pop, we want to push the |
| 165 | * number of holes in the table onto the stack. Because of our 64K-entry |
| 166 | * cap, we can combine the two into a single unsigned 32-bit value. |
| 167 | * Instead of a "bottom" argument we take a "cookie", which includes the |
| 168 | * bottom index and the count of holes below the bottom. |
| 169 | * |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 170 | * Common alternative implementation: make IndirectRef a pointer to the |
| 171 | * actual reference slot. Instead of getting a table and doing a lookup, |
| 172 | * the lookup can be done instantly. Operations like determining the |
| 173 | * type and deleting the reference are more expensive because the table |
| 174 | * must be hunted for (i.e. you have to do a pointer comparison to see |
| 175 | * which table it's in), you can't move the table when expanding it (so |
| 176 | * realloc() is out), and tricks like serial number checking to detect |
| 177 | * stale references aren't possible (though we may be able to get similar |
| 178 | * benefits with other approaches). |
| 179 | * |
| 180 | * TODO: consider a "lastDeleteIndex" for quick hole-filling when an |
| 181 | * add immediately follows a delete; must invalidate after segment pop |
| 182 | * (which could increase the cost/complexity of method call/return). |
| 183 | * Might be worth only using it for JNI globals. |
| 184 | * |
| 185 | * TODO: may want completely different add/remove algorithms for global |
| 186 | * and local refs to improve performance. A large circular buffer might |
| 187 | * reduce the amortized cost of adding global references. |
| 188 | * |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 189 | */ |
| 190 | union IRTSegmentState { |
| 191 | uint32_t all; |
| 192 | struct { |
| 193 | uint32_t topIndex:16; /* index of first unused entry */ |
| 194 | uint32_t numHoles:16; /* #of holes in entire table */ |
| 195 | } parts; |
| 196 | }; |
| 197 | |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 198 | // Try to choose kIRTPrevCount so that sizeof(IrtEntry) is a power of 2. |
| 199 | // Contains multiple entries but only one active one, this helps us detect use after free errors |
| 200 | // since the serial stored in the indirect ref wont match. |
| 201 | static const size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3; |
Hiroshi Yamauchi | 9e47bfa | 2015-02-23 11:14:40 -0800 | [diff] [blame] | 202 | class IrtEntry { |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 203 | public: |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 204 | void Add(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_); |
| 205 | |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 206 | GcRoot<mirror::Object>* GetReference() { |
| 207 | DCHECK_LT(serial_, kIRTPrevCount); |
| 208 | return &references_[serial_]; |
| 209 | } |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 210 | |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 211 | uint32_t GetSerial() const { |
| 212 | return serial_; |
| 213 | } |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 214 | |
| 215 | void SetReference(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 216 | |
| 217 | private: |
| 218 | uint32_t serial_; |
| 219 | GcRoot<mirror::Object> references_[kIRTPrevCount]; |
| 220 | }; |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 221 | static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t), |
Hiroshi Yamauchi | 9e47bfa | 2015-02-23 11:14:40 -0800 | [diff] [blame] | 222 | "Unexpected sizeof(IrtEntry)"); |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 223 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 224 | class IrtIterator { |
| 225 | public: |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 226 | IrtIterator(IrtEntry* table, size_t i, size_t capacity) REQUIRES_SHARED(Locks::mutator_lock_) |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 227 | : table_(table), i_(i), capacity_(capacity) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 228 | } |
| 229 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 230 | IrtIterator& operator++() REQUIRES_SHARED(Locks::mutator_lock_) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 231 | ++i_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 232 | return *this; |
| 233 | } |
| 234 | |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 235 | GcRoot<mirror::Object>* operator*() REQUIRES_SHARED(Locks::mutator_lock_) { |
Hiroshi Yamauchi | ea2e1bd | 2014-06-18 13:47:35 -0700 | [diff] [blame] | 236 | // This does not have a read barrier as this is used to visit roots. |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 237 | return table_[i_].GetReference(); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 238 | } |
| 239 | |
| 240 | bool equals(const IrtIterator& rhs) const { |
| 241 | return (i_ == rhs.i_ && table_ == rhs.table_); |
| 242 | } |
| 243 | |
| 244 | private: |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 245 | IrtEntry* const table_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 246 | size_t i_; |
Ian Rogers | c0542af | 2014-09-03 16:16:56 -0700 | [diff] [blame] | 247 | const size_t capacity_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 248 | }; |
| 249 | |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 250 | bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) { |
| 251 | return lhs.equals(rhs); |
| 252 | } |
| 253 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 254 | bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) { |
| 255 | return !lhs.equals(rhs); |
| 256 | } |
| 257 | |
| 258 | class IndirectReferenceTable { |
| 259 | public: |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 260 | /* |
| 261 | * WARNING: Construction of the IndirectReferenceTable may fail. |
| 262 | * error_msg must not be null. If error_msg is set by the constructor, then |
| 263 | * construction has failed and the IndirectReferenceTable will be in an |
| 264 | * invalid state. Use IsValid to check whether the object is in an invalid |
| 265 | * state. |
| 266 | */ |
| 267 | IndirectReferenceTable(size_t max_count, IndirectRefKind kind, std::string* error_msg); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 268 | |
| 269 | ~IndirectReferenceTable(); |
| 270 | |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 271 | /* |
| 272 | * Checks whether construction of the IndirectReferenceTable succeeded. |
| 273 | * |
| 274 | * This object must only be used if IsValid() returns true. It is safe to |
| 275 | * call IsValid from multiple threads without locking or other explicit |
| 276 | * synchronization. |
| 277 | */ |
Andreas Gampe | 3f5881f | 2015-04-08 10:26:16 -0700 | [diff] [blame] | 278 | bool IsValid() const; |
| 279 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 280 | /* |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 281 | * Add a new entry. "obj" must be a valid non-nullptr object reference. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 282 | * |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 283 | * Returns nullptr if the table is full (max entries reached, or alloc |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 284 | * failed during expansion). |
| 285 | */ |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 286 | IndirectRef Add(uint32_t cookie, ObjPtr<mirror::Object> obj) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 287 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 288 | |
| 289 | /* |
| 290 | * Given an IndirectRef in the table, return the Object it refers to. |
| 291 | * |
| 292 | * Returns kInvalidIndirectRefObject if iref is invalid. |
| 293 | */ |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 294 | template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 295 | ObjPtr<mirror::Object> Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 296 | ALWAYS_INLINE; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 297 | |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 298 | // Synchronized get which reads a reference, acquiring a lock if necessary. |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 299 | template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 300 | ObjPtr<mirror::Object> SynchronizedGet(IndirectRef iref) const |
| 301 | REQUIRES_SHARED(Locks::mutator_lock_) { |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 302 | return Get<kReadBarrierOption>(iref); |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 303 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 304 | |
| 305 | /* |
Jeff Hao | 39b6c24 | 2015-05-19 20:30:23 -0700 | [diff] [blame] | 306 | * Update an existing entry. |
| 307 | * |
| 308 | * Updates an existing indirect reference to point to a new object. |
| 309 | */ |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 310 | void Update(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_); |
Jeff Hao | 39b6c24 | 2015-05-19 20:30:23 -0700 | [diff] [blame] | 311 | |
| 312 | /* |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 313 | * Remove an existing entry. |
| 314 | * |
| 315 | * If the entry is not between the current top index and the bottom index |
| 316 | * specified by the cookie, we don't remove anything. This is the behavior |
| 317 | * required by JNI's DeleteLocalRef function. |
| 318 | * |
| 319 | * Returns "false" if nothing was removed. |
| 320 | */ |
| 321 | bool Remove(uint32_t cookie, IndirectRef iref); |
| 322 | |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 323 | void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 324 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 325 | void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 326 | |
| 327 | /* |
| 328 | * Return the #of entries in the entire table. This includes holes, and |
| 329 | * so may be larger than the actual number of "live" entries. |
| 330 | */ |
| 331 | size_t Capacity() const { |
Ian Rogers | dc51b79 | 2011-09-22 20:41:37 -0700 | [diff] [blame] | 332 | return segment_state_.parts.topIndex; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 333 | } |
| 334 | |
Hiroshi Yamauchi | ea2e1bd | 2014-06-18 13:47:35 -0700 | [diff] [blame] | 335 | // Note IrtIterator does not have a read barrier as it's used to visit roots. |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 336 | IrtIterator begin() { |
| 337 | return IrtIterator(table_, 0, Capacity()); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 338 | } |
| 339 | |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 340 | IrtIterator end() { |
| 341 | return IrtIterator(table_, Capacity(), Capacity()); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 342 | } |
| 343 | |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 344 | void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 345 | REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 346 | |
Ian Rogers | ad25ac5 | 2011-10-04 19:13:33 -0700 | [diff] [blame] | 347 | uint32_t GetSegmentState() const { |
| 348 | return segment_state_.all; |
| 349 | } |
| 350 | |
| 351 | void SetSegmentState(uint32_t new_state) { |
| 352 | segment_state_.all = new_state; |
| 353 | } |
| 354 | |
Andreas Gampe | 4d98c84 | 2015-12-09 15:14:04 -0800 | [diff] [blame] | 355 | static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) { |
| 356 | // Note: Currently segment_state_ is at offset 0. We're testing the expected value in |
| 357 | // jni_internal_test to make sure it stays correct. It is not OFFSETOF_MEMBER, as that |
| 358 | // is not pointer-size-safe. |
| 359 | return Offset(0); |
Ian Rogers | dc51b79 | 2011-09-22 20:41:37 -0700 | [diff] [blame] | 360 | } |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 361 | |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 362 | // Release pages past the end of the table that may have previously held references. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 363 | void Trim() REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 364 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 365 | private: |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 366 | // Extract the table index from an indirect reference. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 367 | static uint32_t ExtractIndex(IndirectRef iref) { |
Ian Rogers | f61db68 | 2014-01-23 20:26:01 -0800 | [diff] [blame] | 368 | uintptr_t uref = reinterpret_cast<uintptr_t>(iref); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 369 | return (uref >> 2) & 0xffff; |
| 370 | } |
| 371 | |
| 372 | /* |
| 373 | * The object pointer itself is subject to relocation in some GC |
| 374 | * implementations, so we shouldn't really be using it here. |
| 375 | */ |
Hiroshi Yamauchi | ea2e1bd | 2014-06-18 13:47:35 -0700 | [diff] [blame] | 376 | IndirectRef ToIndirectRef(uint32_t tableIndex) const { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 377 | DCHECK_LT(tableIndex, 65536U); |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 378 | uint32_t serialChunk = table_[tableIndex].GetSerial(); |
| 379 | uintptr_t uref = (serialChunk << 20) | (tableIndex << 2) | kind_; |
Ian Rogers | f61db68 | 2014-01-23 20:26:01 -0800 | [diff] [blame] | 380 | return reinterpret_cast<IndirectRef>(uref); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 381 | } |
| 382 | |
Andreas Gampe | f1e8630 | 2016-10-03 11:42:31 -0700 | [diff] [blame] | 383 | // Abort if check_jni is not enabled. Otherwise, just log as an error. |
| 384 | static void AbortIfNoCheckJNI(const std::string& msg); |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 385 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 386 | /* extra debugging checks */ |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 387 | bool GetChecked(IndirectRef) const REQUIRES_SHARED(Locks::mutator_lock_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 388 | bool CheckEntry(const char*, IndirectRef, int) const; |
| 389 | |
Ian Rogers | dc51b79 | 2011-09-22 20:41:37 -0700 | [diff] [blame] | 390 | /* semi-public - read/write by jni down calls */ |
| 391 | IRTSegmentState segment_state_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 392 | |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 393 | // Mem map where we store the indirect refs. |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 394 | std::unique_ptr<MemMap> table_mem_map_; |
Hiroshi Yamauchi | ea2e1bd | 2014-06-18 13:47:35 -0700 | [diff] [blame] | 395 | // bottom of the stack. Do not directly access the object references |
| 396 | // in this as they are roots. Use Get() that has a read barrier. |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 397 | IrtEntry* table_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 398 | /* bit mask, ORed into all irefs */ |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 399 | const IndirectRefKind kind_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 400 | /* max #of entries allowed */ |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 401 | const size_t max_entries_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 402 | }; |
| 403 | |
| 404 | } // namespace art |
| 405 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 406 | #endif // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |