Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |
| 18 | #define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 19 | |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 20 | #include <stdint.h> |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 21 | |
| 22 | #include <iosfwd> |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 23 | #include <string> |
| 24 | |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 25 | #include "base/logging.h" |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 26 | #include "base/mutex.h" |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 27 | #include "mem_map.h" |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 28 | #include "object_callbacks.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 29 | #include "offsets.h" |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 30 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 31 | namespace art { |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 32 | namespace mirror { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 33 | class Object; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 34 | } // namespace mirror |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 35 | |
| 36 | /* |
| 37 | * Maintain a table of indirect references. Used for local/global JNI |
| 38 | * references. |
| 39 | * |
| 40 | * The table contains object references that are part of the GC root set. |
| 41 | * When an object is added we return an IndirectRef that is not a valid |
| 42 | * pointer but can be used to find the original value in O(1) time. |
Elliott Hughes | 81ff318 | 2012-03-23 20:35:56 -0700 | [diff] [blame] | 43 | * Conversions to and from indirect references are performed on upcalls |
| 44 | * and downcalls, so they need to be very fast. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 45 | * |
| 46 | * To be efficient for JNI local variable storage, we need to provide |
| 47 | * operations that allow us to operate on segments of the table, where |
| 48 | * segments are pushed and popped as if on a stack. For example, deletion |
| 49 | * of an entry should only succeed if it appears in the current segment, |
| 50 | * and we want to be able to strip off the current segment quickly when |
| 51 | * a method returns. Additions to the table must be made in the current |
| 52 | * segment even if space is available in an earlier area. |
| 53 | * |
| 54 | * A new segment is created when we call into native code from interpreted |
| 55 | * code, or when we handle the JNI PushLocalFrame function. |
| 56 | * |
| 57 | * The GC must be able to scan the entire table quickly. |
| 58 | * |
| 59 | * In summary, these must be very fast: |
| 60 | * - adding or removing a segment |
| 61 | * - adding references to a new segment |
| 62 | * - converting an indirect reference back to an Object |
| 63 | * These can be a little slower, but must still be pretty quick: |
| 64 | * - adding references to a "mature" segment |
| 65 | * - removing individual references |
| 66 | * - scanning the entire table straight through |
| 67 | * |
| 68 | * If there's more than one segment, we don't guarantee that the table |
| 69 | * will fill completely before we fail due to lack of space. We do ensure |
| 70 | * that the current segment will pack tightly, which should satisfy JNI |
| 71 | * requirements (e.g. EnsureLocalCapacity). |
| 72 | * |
| 73 | * To make everything fit nicely in 32-bit integers, the maximum size of |
| 74 | * the table is capped at 64K. |
| 75 | * |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 76 | * Only SynchronizedGet is synchronized. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 77 | */ |
| 78 | |
| 79 | /* |
| 80 | * Indirect reference definition. This must be interchangeable with JNI's |
| 81 | * jobject, and it's convenient to let null be null, so we use void*. |
| 82 | * |
| 83 | * We need a 16-bit table index and a 2-bit reference type (global, local, |
| 84 | * weak global). Real object pointers will have zeroes in the low 2 or 3 |
| 85 | * bits (4- or 8-byte alignment), so it's useful to put the ref type |
| 86 | * in the low bits and reserve zero as an invalid value. |
| 87 | * |
| 88 | * The remaining 14 bits can be used to detect stale indirect references. |
| 89 | * For example, if objects don't move, we can use a hash of the original |
| 90 | * Object* to make sure the entry hasn't been re-used. (If the Object* |
| 91 | * we find there doesn't match because of heap movement, we could do a |
| 92 | * secondary check on the preserved hash value; this implies that creating |
| 93 | * a global/local ref queries the hash value and forces it to be saved.) |
| 94 | * |
| 95 | * A more rigorous approach would be to put a serial number in the extra |
| 96 | * bits, and keep a copy of the serial number in a parallel table. This is |
| 97 | * easier when objects can move, but requires 2x the memory and additional |
| 98 | * memory accesses on add/get. It will catch additional problems, e.g.: |
| 99 | * create iref1 for obj, delete iref1, create iref2 for same obj, lookup |
| 100 | * iref1. A pattern based on object bits will miss this. |
| 101 | */ |
| 102 | typedef void* IndirectRef; |
| 103 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 104 | // Magic failure values; must not pass Heap::ValidateObject() or Heap::IsHeapAddress(). |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 105 | static mirror::Object* const kInvalidIndirectRefObject = reinterpret_cast<mirror::Object*>(0xdead4321); |
| 106 | static mirror::Object* const kClearedJniWeakGlobal = reinterpret_cast<mirror::Object*>(0xdead1234); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 107 | |
| 108 | /* |
| 109 | * Indirect reference kind, used as the two low bits of IndirectRef. |
| 110 | * |
| 111 | * For convenience these match up with enum jobjectRefType from jni.h. |
| 112 | */ |
| 113 | enum IndirectRefKind { |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 114 | kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>> |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 115 | kLocal = 1, // <<local reference>> |
| 116 | kGlobal = 2, // <<global reference>> |
| 117 | kWeakGlobal = 3 // <<weak global reference>> |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 118 | }; |
Elliott Hughes | 0e57ccb | 2012-04-03 16:04:52 -0700 | [diff] [blame] | 119 | std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 120 | |
| 121 | /* |
| 122 | * Determine what kind of indirect reference this is. |
| 123 | */ |
| 124 | static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) { |
| 125 | return static_cast<IndirectRefKind>(reinterpret_cast<uintptr_t>(iref) & 0x03); |
| 126 | } |
| 127 | |
| 128 | /* |
| 129 | * Extended debugging structure. We keep a parallel array of these, one |
| 130 | * per slot in the table. |
| 131 | */ |
| 132 | static const size_t kIRTPrevCount = 4; |
| 133 | struct IndirectRefSlot { |
| 134 | uint32_t serial; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 135 | const mirror::Object* previous[kIRTPrevCount]; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 136 | }; |
| 137 | |
| 138 | /* use as initial value for "cookie", and when table has only one segment */ |
| 139 | static const uint32_t IRT_FIRST_SEGMENT = 0; |
| 140 | |
| 141 | /* |
| 142 | * Table definition. |
| 143 | * |
| 144 | * For the global reference table, the expected common operations are |
| 145 | * adding a new entry and removing a recently-added entry (usually the |
| 146 | * most-recently-added entry). For JNI local references, the common |
| 147 | * operations are adding a new entry and removing an entire table segment. |
| 148 | * |
| 149 | * If "alloc_entries_" is not equal to "max_entries_", the table may expand |
| 150 | * when entries are added, which means the memory may move. If you want |
| 151 | * to keep pointers into "table" rather than offsets, you must use a |
| 152 | * fixed-size table. |
| 153 | * |
| 154 | * If we delete entries from the middle of the list, we will be left with |
| 155 | * "holes". We track the number of holes so that, when adding new elements, |
| 156 | * we can quickly decide to do a trivial append or go slot-hunting. |
| 157 | * |
| 158 | * When the top-most entry is removed, any holes immediately below it are |
| 159 | * also removed. Thus, deletion of an entry may reduce "topIndex" by more |
| 160 | * than one. |
| 161 | * |
| 162 | * To get the desired behavior for JNI locals, we need to know the bottom |
| 163 | * and top of the current "segment". The top is managed internally, and |
Elliott Hughes | 81ff318 | 2012-03-23 20:35:56 -0700 | [diff] [blame] | 164 | * the bottom is passed in as a function argument. When we call a native method or |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 165 | * push a local frame, the current top index gets pushed on, and serves |
| 166 | * as the new bottom. When we pop a frame off, the value from the stack |
| 167 | * becomes the new top index, and the value stored in the previous frame |
| 168 | * becomes the new bottom. |
| 169 | * |
| 170 | * To avoid having to re-scan the table after a pop, we want to push the |
| 171 | * number of holes in the table onto the stack. Because of our 64K-entry |
| 172 | * cap, we can combine the two into a single unsigned 32-bit value. |
| 173 | * Instead of a "bottom" argument we take a "cookie", which includes the |
| 174 | * bottom index and the count of holes below the bottom. |
| 175 | * |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 176 | * Common alternative implementation: make IndirectRef a pointer to the |
| 177 | * actual reference slot. Instead of getting a table and doing a lookup, |
| 178 | * the lookup can be done instantly. Operations like determining the |
| 179 | * type and deleting the reference are more expensive because the table |
| 180 | * must be hunted for (i.e. you have to do a pointer comparison to see |
| 181 | * which table it's in), you can't move the table when expanding it (so |
| 182 | * realloc() is out), and tricks like serial number checking to detect |
| 183 | * stale references aren't possible (though we may be able to get similar |
| 184 | * benefits with other approaches). |
| 185 | * |
| 186 | * TODO: consider a "lastDeleteIndex" for quick hole-filling when an |
| 187 | * add immediately follows a delete; must invalidate after segment pop |
| 188 | * (which could increase the cost/complexity of method call/return). |
| 189 | * Might be worth only using it for JNI globals. |
| 190 | * |
| 191 | * TODO: may want completely different add/remove algorithms for global |
| 192 | * and local refs to improve performance. A large circular buffer might |
| 193 | * reduce the amortized cost of adding global references. |
| 194 | * |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 195 | */ |
| 196 | union IRTSegmentState { |
| 197 | uint32_t all; |
| 198 | struct { |
| 199 | uint32_t topIndex:16; /* index of first unused entry */ |
| 200 | uint32_t numHoles:16; /* #of holes in entire table */ |
| 201 | } parts; |
| 202 | }; |
| 203 | |
| 204 | class IrtIterator { |
| 205 | public: |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 206 | explicit IrtIterator(mirror::Object** table, size_t i, size_t capacity) |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 207 | : table_(table), i_(i), capacity_(capacity) { |
| 208 | SkipNullsAndTombstones(); |
| 209 | } |
| 210 | |
| 211 | IrtIterator& operator++() { |
| 212 | ++i_; |
| 213 | SkipNullsAndTombstones(); |
| 214 | return *this; |
| 215 | } |
| 216 | |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 217 | mirror::Object** operator*() { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 218 | return &table_[i_]; |
| 219 | } |
| 220 | |
| 221 | bool equals(const IrtIterator& rhs) const { |
| 222 | return (i_ == rhs.i_ && table_ == rhs.table_); |
| 223 | } |
| 224 | |
| 225 | private: |
| 226 | void SkipNullsAndTombstones() { |
| 227 | // We skip NULLs and tombstones. Clients don't want to see implementation details. |
| 228 | while (i_ < capacity_ && (table_[i_] == NULL || table_[i_] == kClearedJniWeakGlobal)) { |
| 229 | ++i_; |
| 230 | } |
| 231 | } |
| 232 | |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 233 | mirror::Object** const table_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 234 | size_t i_; |
| 235 | size_t capacity_; |
| 236 | }; |
| 237 | |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 238 | bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) { |
| 239 | return lhs.equals(rhs); |
| 240 | } |
| 241 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 242 | bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) { |
| 243 | return !lhs.equals(rhs); |
| 244 | } |
| 245 | |
| 246 | class IndirectReferenceTable { |
| 247 | public: |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 248 | IndirectReferenceTable(size_t initialCount, size_t maxCount, IndirectRefKind kind); |
| 249 | |
| 250 | ~IndirectReferenceTable(); |
| 251 | |
| 252 | /* |
Elliott Hughes | e84278b | 2012-03-22 10:06:53 -0700 | [diff] [blame] | 253 | * Add a new entry. "obj" must be a valid non-NULL object reference. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 254 | * |
| 255 | * Returns NULL if the table is full (max entries reached, or alloc |
| 256 | * failed during expansion). |
| 257 | */ |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 258 | IndirectRef Add(uint32_t cookie, mirror::Object* obj) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 259 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 260 | |
| 261 | /* |
| 262 | * Given an IndirectRef in the table, return the Object it refers to. |
| 263 | * |
| 264 | * Returns kInvalidIndirectRefObject if iref is invalid. |
| 265 | */ |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 266 | template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 267 | mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) |
| 268 | ALWAYS_INLINE; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 269 | |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 270 | // Synchronized get which reads a reference, acquiring a lock if necessary. |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 271 | template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 272 | mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/, |
| 273 | IndirectRef iref) const |
| 274 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 275 | return Get<kReadBarrierOption>(iref); |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 276 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 277 | |
| 278 | /* |
| 279 | * Remove an existing entry. |
| 280 | * |
| 281 | * If the entry is not between the current top index and the bottom index |
| 282 | * specified by the cookie, we don't remove anything. This is the behavior |
| 283 | * required by JNI's DeleteLocalRef function. |
| 284 | * |
| 285 | * Returns "false" if nothing was removed. |
| 286 | */ |
| 287 | bool Remove(uint32_t cookie, IndirectRef iref); |
| 288 | |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 289 | void AssertEmpty(); |
| 290 | |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 291 | void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 292 | |
| 293 | /* |
| 294 | * Return the #of entries in the entire table. This includes holes, and |
| 295 | * so may be larger than the actual number of "live" entries. |
| 296 | */ |
| 297 | size_t Capacity() const { |
Ian Rogers | dc51b79 | 2011-09-22 20:41:37 -0700 | [diff] [blame] | 298 | return segment_state_.parts.topIndex; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 299 | } |
| 300 | |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 301 | IrtIterator begin() { |
| 302 | return IrtIterator(table_, 0, Capacity()); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 303 | } |
| 304 | |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 305 | IrtIterator end() { |
| 306 | return IrtIterator(table_, Capacity(), Capacity()); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 307 | } |
| 308 | |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 309 | void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type); |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 310 | |
Ian Rogers | ad25ac5 | 2011-10-04 19:13:33 -0700 | [diff] [blame] | 311 | uint32_t GetSegmentState() const { |
| 312 | return segment_state_.all; |
| 313 | } |
| 314 | |
| 315 | void SetSegmentState(uint32_t new_state) { |
| 316 | segment_state_.all = new_state; |
| 317 | } |
| 318 | |
Ian Rogers | dc51b79 | 2011-09-22 20:41:37 -0700 | [diff] [blame] | 319 | static Offset SegmentStateOffset() { |
| 320 | return Offset(OFFSETOF_MEMBER(IndirectReferenceTable, segment_state_)); |
| 321 | } |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 322 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 323 | private: |
| 324 | /* |
| 325 | * Extract the table index from an indirect reference. |
| 326 | */ |
| 327 | static uint32_t ExtractIndex(IndirectRef iref) { |
Ian Rogers | f61db68 | 2014-01-23 20:26:01 -0800 | [diff] [blame] | 328 | uintptr_t uref = reinterpret_cast<uintptr_t>(iref); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 329 | return (uref >> 2) & 0xffff; |
| 330 | } |
| 331 | |
| 332 | /* |
| 333 | * The object pointer itself is subject to relocation in some GC |
| 334 | * implementations, so we shouldn't really be using it here. |
| 335 | */ |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 336 | IndirectRef ToIndirectRef(const mirror::Object* /*o*/, uint32_t tableIndex) const { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 337 | DCHECK_LT(tableIndex, 65536U); |
| 338 | uint32_t serialChunk = slot_data_[tableIndex].serial; |
Ian Rogers | f61db68 | 2014-01-23 20:26:01 -0800 | [diff] [blame] | 339 | uintptr_t uref = serialChunk << 20 | (tableIndex << 2) | kind_; |
| 340 | return reinterpret_cast<IndirectRef>(uref); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 341 | } |
| 342 | |
| 343 | /* |
| 344 | * Update extended debug info when an entry is added. |
| 345 | * |
| 346 | * We advance the serial number, invalidating any outstanding references to |
| 347 | * this slot. |
| 348 | */ |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 349 | void UpdateSlotAdd(const mirror::Object* obj, int slot) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 350 | if (slot_data_ != NULL) { |
| 351 | IndirectRefSlot* pSlot = &slot_data_[slot]; |
| 352 | pSlot->serial++; |
| 353 | pSlot->previous[pSlot->serial % kIRTPrevCount] = obj; |
| 354 | } |
| 355 | } |
| 356 | |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 357 | // Abort if check_jni is not enabled. |
| 358 | static void AbortIfNoCheckJNI(); |
| 359 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 360 | /* extra debugging checks */ |
| 361 | bool GetChecked(IndirectRef) const; |
| 362 | bool CheckEntry(const char*, IndirectRef, int) const; |
| 363 | |
Ian Rogers | dc51b79 | 2011-09-22 20:41:37 -0700 | [diff] [blame] | 364 | /* semi-public - read/write by jni down calls */ |
| 365 | IRTSegmentState segment_state_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 366 | |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 367 | // Mem map where we store the indirect refs. |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 368 | std::unique_ptr<MemMap> table_mem_map_; |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 369 | // Mem map where we store the extended debugging info. |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 370 | std::unique_ptr<MemMap> slot_mem_map_; |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 371 | // bottom of the stack. If a JNI weak global table, do not directly |
| 372 | // access the object references in this as they are weak roots. Use |
| 373 | // Get() that has a read barrier. |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 374 | mirror::Object** table_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 375 | /* bit mask, ORed into all irefs */ |
| 376 | IndirectRefKind kind_; |
| 377 | /* extended debugging info */ |
| 378 | IndirectRefSlot* slot_data_; |
| 379 | /* #of entries we have space for */ |
| 380 | size_t alloc_entries_; |
| 381 | /* max #of entries allowed */ |
| 382 | size_t max_entries_; |
| 383 | }; |
| 384 | |
| 385 | } // namespace art |
| 386 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 387 | #endif // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |