Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |
| 18 | #define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 19 | |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 20 | #include <stdint.h> |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 21 | |
| 22 | #include <iosfwd> |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 23 | #include <string> |
| 24 | |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 25 | #include "base/logging.h" |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 26 | #include "base/mutex.h" |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 27 | #include "object_callbacks.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 28 | #include "offsets.h" |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 29 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 30 | namespace art { |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 31 | namespace mirror { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 32 | class Object; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 33 | } // namespace mirror |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 34 | |
| 35 | /* |
| 36 | * Maintain a table of indirect references. Used for local/global JNI |
| 37 | * references. |
| 38 | * |
| 39 | * The table contains object references that are part of the GC root set. |
| 40 | * When an object is added we return an IndirectRef that is not a valid |
| 41 | * pointer but can be used to find the original value in O(1) time. |
Elliott Hughes | 81ff318 | 2012-03-23 20:35:56 -0700 | [diff] [blame] | 42 | * Conversions to and from indirect references are performed on upcalls |
| 43 | * and downcalls, so they need to be very fast. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 44 | * |
| 45 | * To be efficient for JNI local variable storage, we need to provide |
| 46 | * operations that allow us to operate on segments of the table, where |
| 47 | * segments are pushed and popped as if on a stack. For example, deletion |
| 48 | * of an entry should only succeed if it appears in the current segment, |
| 49 | * and we want to be able to strip off the current segment quickly when |
| 50 | * a method returns. Additions to the table must be made in the current |
| 51 | * segment even if space is available in an earlier area. |
| 52 | * |
| 53 | * A new segment is created when we call into native code from interpreted |
| 54 | * code, or when we handle the JNI PushLocalFrame function. |
| 55 | * |
| 56 | * The GC must be able to scan the entire table quickly. |
| 57 | * |
| 58 | * In summary, these must be very fast: |
| 59 | * - adding or removing a segment |
| 60 | * - adding references to a new segment |
| 61 | * - converting an indirect reference back to an Object |
| 62 | * These can be a little slower, but must still be pretty quick: |
| 63 | * - adding references to a "mature" segment |
| 64 | * - removing individual references |
| 65 | * - scanning the entire table straight through |
| 66 | * |
| 67 | * If there's more than one segment, we don't guarantee that the table |
| 68 | * will fill completely before we fail due to lack of space. We do ensure |
| 69 | * that the current segment will pack tightly, which should satisfy JNI |
| 70 | * requirements (e.g. EnsureLocalCapacity). |
| 71 | * |
| 72 | * To make everything fit nicely in 32-bit integers, the maximum size of |
| 73 | * the table is capped at 64K. |
| 74 | * |
| 75 | * None of the table functions are synchronized. |
| 76 | */ |
| 77 | |
| 78 | /* |
| 79 | * Indirect reference definition. This must be interchangeable with JNI's |
| 80 | * jobject, and it's convenient to let null be null, so we use void*. |
| 81 | * |
| 82 | * We need a 16-bit table index and a 2-bit reference type (global, local, |
| 83 | * weak global). Real object pointers will have zeroes in the low 2 or 3 |
| 84 | * bits (4- or 8-byte alignment), so it's useful to put the ref type |
| 85 | * in the low bits and reserve zero as an invalid value. |
| 86 | * |
| 87 | * The remaining 14 bits can be used to detect stale indirect references. |
| 88 | * For example, if objects don't move, we can use a hash of the original |
| 89 | * Object* to make sure the entry hasn't been re-used. (If the Object* |
| 90 | * we find there doesn't match because of heap movement, we could do a |
| 91 | * secondary check on the preserved hash value; this implies that creating |
| 92 | * a global/local ref queries the hash value and forces it to be saved.) |
| 93 | * |
| 94 | * A more rigorous approach would be to put a serial number in the extra |
| 95 | * bits, and keep a copy of the serial number in a parallel table. This is |
| 96 | * easier when objects can move, but requires 2x the memory and additional |
| 97 | * memory accesses on add/get. It will catch additional problems, e.g.: |
| 98 | * create iref1 for obj, delete iref1, create iref2 for same obj, lookup |
| 99 | * iref1. A pattern based on object bits will miss this. |
| 100 | */ |
| 101 | typedef void* IndirectRef; |
| 102 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 103 | // Magic failure values; must not pass Heap::ValidateObject() or Heap::IsHeapAddress(). |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 104 | static mirror::Object* const kInvalidIndirectRefObject = reinterpret_cast<mirror::Object*>(0xdead4321); |
| 105 | static mirror::Object* const kClearedJniWeakGlobal = reinterpret_cast<mirror::Object*>(0xdead1234); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 106 | |
| 107 | /* |
| 108 | * Indirect reference kind, used as the two low bits of IndirectRef. |
| 109 | * |
| 110 | * For convenience these match up with enum jobjectRefType from jni.h. |
| 111 | */ |
| 112 | enum IndirectRefKind { |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 113 | kSirtOrInvalid = 0, // <<stack indirect reference table or invalid reference>> |
| 114 | kLocal = 1, // <<local reference>> |
| 115 | kGlobal = 2, // <<global reference>> |
| 116 | kWeakGlobal = 3 // <<weak global reference>> |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 117 | }; |
Elliott Hughes | 0e57ccb | 2012-04-03 16:04:52 -0700 | [diff] [blame] | 118 | std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 119 | |
| 120 | /* |
| 121 | * Determine what kind of indirect reference this is. |
| 122 | */ |
| 123 | static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) { |
| 124 | return static_cast<IndirectRefKind>(reinterpret_cast<uintptr_t>(iref) & 0x03); |
| 125 | } |
| 126 | |
| 127 | /* |
| 128 | * Extended debugging structure. We keep a parallel array of these, one |
| 129 | * per slot in the table. |
| 130 | */ |
| 131 | static const size_t kIRTPrevCount = 4; |
| 132 | struct IndirectRefSlot { |
| 133 | uint32_t serial; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 134 | const mirror::Object* previous[kIRTPrevCount]; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 135 | }; |
| 136 | |
| 137 | /* use as initial value for "cookie", and when table has only one segment */ |
| 138 | static const uint32_t IRT_FIRST_SEGMENT = 0; |
| 139 | |
| 140 | /* |
| 141 | * Table definition. |
| 142 | * |
| 143 | * For the global reference table, the expected common operations are |
| 144 | * adding a new entry and removing a recently-added entry (usually the |
| 145 | * most-recently-added entry). For JNI local references, the common |
| 146 | * operations are adding a new entry and removing an entire table segment. |
| 147 | * |
| 148 | * If "alloc_entries_" is not equal to "max_entries_", the table may expand |
| 149 | * when entries are added, which means the memory may move. If you want |
| 150 | * to keep pointers into "table" rather than offsets, you must use a |
| 151 | * fixed-size table. |
| 152 | * |
| 153 | * If we delete entries from the middle of the list, we will be left with |
| 154 | * "holes". We track the number of holes so that, when adding new elements, |
| 155 | * we can quickly decide to do a trivial append or go slot-hunting. |
| 156 | * |
| 157 | * When the top-most entry is removed, any holes immediately below it are |
| 158 | * also removed. Thus, deletion of an entry may reduce "topIndex" by more |
| 159 | * than one. |
| 160 | * |
| 161 | * To get the desired behavior for JNI locals, we need to know the bottom |
| 162 | * and top of the current "segment". The top is managed internally, and |
Elliott Hughes | 81ff318 | 2012-03-23 20:35:56 -0700 | [diff] [blame] | 163 | * the bottom is passed in as a function argument. When we call a native method or |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 164 | * push a local frame, the current top index gets pushed on, and serves |
| 165 | * as the new bottom. When we pop a frame off, the value from the stack |
| 166 | * becomes the new top index, and the value stored in the previous frame |
| 167 | * becomes the new bottom. |
| 168 | * |
| 169 | * To avoid having to re-scan the table after a pop, we want to push the |
| 170 | * number of holes in the table onto the stack. Because of our 64K-entry |
| 171 | * cap, we can combine the two into a single unsigned 32-bit value. |
| 172 | * Instead of a "bottom" argument we take a "cookie", which includes the |
| 173 | * bottom index and the count of holes below the bottom. |
| 174 | * |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 175 | * Common alternative implementation: make IndirectRef a pointer to the |
| 176 | * actual reference slot. Instead of getting a table and doing a lookup, |
| 177 | * the lookup can be done instantly. Operations like determining the |
| 178 | * type and deleting the reference are more expensive because the table |
| 179 | * must be hunted for (i.e. you have to do a pointer comparison to see |
| 180 | * which table it's in), you can't move the table when expanding it (so |
| 181 | * realloc() is out), and tricks like serial number checking to detect |
| 182 | * stale references aren't possible (though we may be able to get similar |
| 183 | * benefits with other approaches). |
| 184 | * |
| 185 | * TODO: consider a "lastDeleteIndex" for quick hole-filling when an |
| 186 | * add immediately follows a delete; must invalidate after segment pop |
| 187 | * (which could increase the cost/complexity of method call/return). |
| 188 | * Might be worth only using it for JNI globals. |
| 189 | * |
| 190 | * TODO: may want completely different add/remove algorithms for global |
| 191 | * and local refs to improve performance. A large circular buffer might |
| 192 | * reduce the amortized cost of adding global references. |
| 193 | * |
| 194 | * TODO: if we can guarantee that the underlying storage doesn't move, |
| 195 | * e.g. by using oversized mmap regions to handle expanding tables, we may |
| 196 | * be able to avoid having to synchronize lookups. Might make sense to |
| 197 | * add a "synchronized lookup" call that takes the mutex as an argument, |
| 198 | * and either locks or doesn't lock based on internal details. |
| 199 | */ |
| 200 | union IRTSegmentState { |
| 201 | uint32_t all; |
| 202 | struct { |
| 203 | uint32_t topIndex:16; /* index of first unused entry */ |
| 204 | uint32_t numHoles:16; /* #of holes in entire table */ |
| 205 | } parts; |
| 206 | }; |
| 207 | |
| 208 | class IrtIterator { |
| 209 | public: |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 210 | explicit IrtIterator(mirror::Object** table, size_t i, size_t capacity) |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 211 | : table_(table), i_(i), capacity_(capacity) { |
| 212 | SkipNullsAndTombstones(); |
| 213 | } |
| 214 | |
| 215 | IrtIterator& operator++() { |
| 216 | ++i_; |
| 217 | SkipNullsAndTombstones(); |
| 218 | return *this; |
| 219 | } |
| 220 | |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 221 | mirror::Object** operator*() { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 222 | return &table_[i_]; |
| 223 | } |
| 224 | |
| 225 | bool equals(const IrtIterator& rhs) const { |
| 226 | return (i_ == rhs.i_ && table_ == rhs.table_); |
| 227 | } |
| 228 | |
| 229 | private: |
| 230 | void SkipNullsAndTombstones() { |
| 231 | // We skip NULLs and tombstones. Clients don't want to see implementation details. |
| 232 | while (i_ < capacity_ && (table_[i_] == NULL || table_[i_] == kClearedJniWeakGlobal)) { |
| 233 | ++i_; |
| 234 | } |
| 235 | } |
| 236 | |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 237 | mirror::Object** table_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 238 | size_t i_; |
| 239 | size_t capacity_; |
| 240 | }; |
| 241 | |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 242 | bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) { |
| 243 | return lhs.equals(rhs); |
| 244 | } |
| 245 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 246 | bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) { |
| 247 | return !lhs.equals(rhs); |
| 248 | } |
| 249 | |
| 250 | class IndirectReferenceTable { |
| 251 | public: |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 252 | IndirectReferenceTable(size_t initialCount, size_t maxCount, IndirectRefKind kind); |
| 253 | |
| 254 | ~IndirectReferenceTable(); |
| 255 | |
| 256 | /* |
Elliott Hughes | e84278b | 2012-03-22 10:06:53 -0700 | [diff] [blame] | 257 | * Add a new entry. "obj" must be a valid non-NULL object reference. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 258 | * |
| 259 | * Returns NULL if the table is full (max entries reached, or alloc |
| 260 | * failed during expansion). |
| 261 | */ |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 262 | IndirectRef Add(uint32_t cookie, mirror::Object* obj) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 263 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 264 | |
| 265 | /* |
| 266 | * Given an IndirectRef in the table, return the Object it refers to. |
| 267 | * |
| 268 | * Returns kInvalidIndirectRefObject if iref is invalid. |
| 269 | */ |
Mathieu Chartier | c645f1d | 2014-03-06 18:11:53 -0800 | [diff] [blame] | 270 | mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 271 | |
Elliott Hughes | c5bfa8f | 2011-08-30 14:32:49 -0700 | [diff] [blame] | 272 | // TODO: remove when we remove work_around_app_jni_bugs support. |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 273 | bool ContainsDirectPointer(mirror::Object* direct_pointer) const; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 274 | |
| 275 | /* |
| 276 | * Remove an existing entry. |
| 277 | * |
| 278 | * If the entry is not between the current top index and the bottom index |
| 279 | * specified by the cookie, we don't remove anything. This is the behavior |
| 280 | * required by JNI's DeleteLocalRef function. |
| 281 | * |
| 282 | * Returns "false" if nothing was removed. |
| 283 | */ |
| 284 | bool Remove(uint32_t cookie, IndirectRef iref); |
| 285 | |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 286 | void AssertEmpty(); |
| 287 | |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 288 | void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 289 | |
| 290 | /* |
| 291 | * Return the #of entries in the entire table. This includes holes, and |
| 292 | * so may be larger than the actual number of "live" entries. |
| 293 | */ |
| 294 | size_t Capacity() const { |
Ian Rogers | dc51b79 | 2011-09-22 20:41:37 -0700 | [diff] [blame] | 295 | return segment_state_.parts.topIndex; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 296 | } |
| 297 | |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 298 | IrtIterator begin() { |
| 299 | return IrtIterator(table_, 0, Capacity()); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 300 | } |
| 301 | |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 302 | IrtIterator end() { |
| 303 | return IrtIterator(table_, Capacity(), Capacity()); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 304 | } |
| 305 | |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 306 | void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type); |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 307 | |
Ian Rogers | ad25ac5 | 2011-10-04 19:13:33 -0700 | [diff] [blame] | 308 | uint32_t GetSegmentState() const { |
| 309 | return segment_state_.all; |
| 310 | } |
| 311 | |
| 312 | void SetSegmentState(uint32_t new_state) { |
| 313 | segment_state_.all = new_state; |
| 314 | } |
| 315 | |
Ian Rogers | dc51b79 | 2011-09-22 20:41:37 -0700 | [diff] [blame] | 316 | static Offset SegmentStateOffset() { |
| 317 | return Offset(OFFSETOF_MEMBER(IndirectReferenceTable, segment_state_)); |
| 318 | } |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 319 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 320 | private: |
| 321 | /* |
| 322 | * Extract the table index from an indirect reference. |
| 323 | */ |
| 324 | static uint32_t ExtractIndex(IndirectRef iref) { |
Ian Rogers | f61db68 | 2014-01-23 20:26:01 -0800 | [diff] [blame] | 325 | uintptr_t uref = reinterpret_cast<uintptr_t>(iref); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 326 | return (uref >> 2) & 0xffff; |
| 327 | } |
| 328 | |
| 329 | /* |
| 330 | * The object pointer itself is subject to relocation in some GC |
| 331 | * implementations, so we shouldn't really be using it here. |
| 332 | */ |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 333 | IndirectRef ToIndirectRef(const mirror::Object* /*o*/, uint32_t tableIndex) const { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 334 | DCHECK_LT(tableIndex, 65536U); |
| 335 | uint32_t serialChunk = slot_data_[tableIndex].serial; |
Ian Rogers | f61db68 | 2014-01-23 20:26:01 -0800 | [diff] [blame] | 336 | uintptr_t uref = serialChunk << 20 | (tableIndex << 2) | kind_; |
| 337 | return reinterpret_cast<IndirectRef>(uref); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 338 | } |
| 339 | |
| 340 | /* |
| 341 | * Update extended debug info when an entry is added. |
| 342 | * |
| 343 | * We advance the serial number, invalidating any outstanding references to |
| 344 | * this slot. |
| 345 | */ |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 346 | void UpdateSlotAdd(const mirror::Object* obj, int slot) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 347 | if (slot_data_ != NULL) { |
| 348 | IndirectRefSlot* pSlot = &slot_data_[slot]; |
| 349 | pSlot->serial++; |
| 350 | pSlot->previous[pSlot->serial % kIRTPrevCount] = obj; |
| 351 | } |
| 352 | } |
| 353 | |
| 354 | /* extra debugging checks */ |
| 355 | bool GetChecked(IndirectRef) const; |
| 356 | bool CheckEntry(const char*, IndirectRef, int) const; |
| 357 | |
Ian Rogers | dc51b79 | 2011-09-22 20:41:37 -0700 | [diff] [blame] | 358 | /* semi-public - read/write by jni down calls */ |
| 359 | IRTSegmentState segment_state_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 360 | |
| 361 | /* bottom of the stack */ |
Mathieu Chartier | 423d2a3 | 2013-09-12 17:33:56 -0700 | [diff] [blame] | 362 | mirror::Object** table_; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 363 | /* bit mask, ORed into all irefs */ |
| 364 | IndirectRefKind kind_; |
| 365 | /* extended debugging info */ |
| 366 | IndirectRefSlot* slot_data_; |
| 367 | /* #of entries we have space for */ |
| 368 | size_t alloc_entries_; |
| 369 | /* max #of entries allowed */ |
| 370 | size_t max_entries_; |
| 371 | }; |
| 372 | |
| 373 | } // namespace art |
| 374 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 375 | #endif // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_ |