blob: d2093f2818fcc5324ac0844971bdb6e797ca0933 [file] [log] [blame]
Elliott Hughes6c1a3942011-08-17 15:00:06 -07001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
18#define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
Elliott Hughes6c1a3942011-08-17 15:00:06 -070019
Elliott Hughes07ed66b2012-12-12 18:34:25 -080020#include <stdint.h>
Elliott Hughes6c1a3942011-08-17 15:00:06 -070021
22#include <iosfwd>
Andreas Gampee03662b2016-10-13 17:12:56 -070023#include <limits>
Elliott Hughes6c1a3942011-08-17 15:00:06 -070024#include <string>
25
Andreas Gampe57943812017-12-06 21:39:13 -080026#include <android-base/logging.h>
27
Andreas Gampedc061d02016-10-24 13:19:37 -070028#include "base/bit_utils.h"
Andreas Gampe57943812017-12-06 21:39:13 -080029#include "base/macros.h"
Ian Rogers719d1a32014-03-06 12:13:39 -080030#include "base/mutex.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070031#include "gc_root.h"
Mathieu Chartier8778c522016-10-04 19:06:30 -070032#include "obj_ptr.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080033#include "offsets.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070034#include "read_barrier_option.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080035
Elliott Hughes6c1a3942011-08-17 15:00:06 -070036namespace art {
Ian Rogers68d8b422014-07-17 11:09:10 -070037
Mathieu Chartiere34fa1d2015-01-14 14:55:47 -080038class RootInfo;
39
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080040namespace mirror {
Elliott Hughes6c1a3942011-08-17 15:00:06 -070041class Object;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080042} // namespace mirror
Elliott Hughes6c1a3942011-08-17 15:00:06 -070043
Ian Rogers68d8b422014-07-17 11:09:10 -070044class MemMap;
45
Andreas Gampee03662b2016-10-13 17:12:56 -070046// Maintain a table of indirect references. Used for local/global JNI references.
47//
48// The table contains object references, where the strong (local/global) references are part of the
49// GC root set (but not the weak global references). When an object is added we return an
50// IndirectRef that is not a valid pointer but can be used to find the original value in O(1) time.
51// Conversions to and from indirect references are performed on upcalls and downcalls, so they need
52// to be very fast.
53//
54// To be efficient for JNI local variable storage, we need to provide operations that allow us to
55// operate on segments of the table, where segments are pushed and popped as if on a stack. For
56// example, deletion of an entry should only succeed if it appears in the current segment, and we
57// want to be able to strip off the current segment quickly when a method returns. Additions to the
58// table must be made in the current segment even if space is available in an earlier area.
59//
60// A new segment is created when we call into native code from interpreted code, or when we handle
61// the JNI PushLocalFrame function.
62//
63// The GC must be able to scan the entire table quickly.
64//
65// In summary, these must be very fast:
66// - adding or removing a segment
67// - adding references to a new segment
68// - converting an indirect reference back to an Object
69// These can be a little slower, but must still be pretty quick:
70// - adding references to a "mature" segment
71// - removing individual references
72// - scanning the entire table straight through
73//
74// If there's more than one segment, we don't guarantee that the table will fill completely before
75// we fail due to lack of space. We do ensure that the current segment will pack tightly, which
76// should satisfy JNI requirements (e.g. EnsureLocalCapacity).
77//
78// Only SynchronizedGet is synchronized.
Elliott Hughes6c1a3942011-08-17 15:00:06 -070079
Andreas Gampee03662b2016-10-13 17:12:56 -070080// Indirect reference definition. This must be interchangeable with JNI's jobject, and it's
81// convenient to let null be null, so we use void*.
82//
83// We need a (potentially) large table index and a 2-bit reference type (global, local, weak
84// global). We also reserve some bits to be used to detect stale indirect references: we put a
85// serial number in the extra bits, and keep a copy of the serial number in the table. This requires
86// more memory and additional memory accesses on add/get, but is moving-GC safe. It will catch
87// additional problems, e.g.: create iref1 for obj, delete iref1, create iref2 for same obj,
88// lookup iref1. A pattern based on object bits will miss this.
Elliott Hughes6c1a3942011-08-17 15:00:06 -070089typedef void* IndirectRef;
90
Andreas Gampee03662b2016-10-13 17:12:56 -070091// Indirect reference kind, used as the two low bits of IndirectRef.
92//
93// For convenience these match up with enum jobjectRefType from jni.h.
Elliott Hughes6c1a3942011-08-17 15:00:06 -070094enum IndirectRefKind {
Andreas Gampedc061d02016-10-24 13:19:37 -070095 kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
96 kLocal = 1, // <<local reference>>
97 kGlobal = 2, // <<global reference>>
98 kWeakGlobal = 3, // <<weak global reference>>
99 kLastKind = kWeakGlobal
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700100};
Elliott Hughes0e57ccb2012-04-03 16:04:52 -0700101std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs);
Andreas Gampef1e86302016-10-03 11:42:31 -0700102const char* GetIndirectRefKindString(const IndirectRefKind& kind);
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700103
Andreas Gampee03662b2016-10-13 17:12:56 -0700104// Table definition.
105//
106// For the global reference table, the expected common operations are adding a new entry and
107// removing a recently-added entry (usually the most-recently-added entry). For JNI local
108// references, the common operations are adding a new entry and removing an entire table segment.
109//
110// If we delete entries from the middle of the list, we will be left with "holes". We track the
111// number of holes so that, when adding new elements, we can quickly decide to do a trivial append
112// or go slot-hunting.
113//
114// When the top-most entry is removed, any holes immediately below it are also removed. Thus,
115// deletion of an entry may reduce "top_index" by more than one.
116//
117// To get the desired behavior for JNI locals, we need to know the bottom and top of the current
118// "segment". The top is managed internally, and the bottom is passed in as a function argument.
119// When we call a native method or push a local frame, the current top index gets pushed on, and
120// serves as the new bottom. When we pop a frame off, the value from the stack becomes the new top
121// index, and the value stored in the previous frame becomes the new bottom.
122//
123// Holes are being locally cached for the segment. Otherwise we'd have to pass bottom index and
124// number of holes, which restricts us to 16 bits for the top index. The value is cached within the
125// table. To avoid code in generated JNI transitions, which implicitly form segments, the code for
126// adding and removing references needs to detect the change of a segment. Helper fields are used
127// for this detection.
128//
129// Common alternative implementation: make IndirectRef a pointer to the actual reference slot.
130// Instead of getting a table and doing a lookup, the lookup can be done instantly. Operations like
131// determining the type and deleting the reference are more expensive because the table must be
132// hunted for (i.e. you have to do a pointer comparison to see which table it's in), you can't move
133// the table when expanding it (so realloc() is out), and tricks like serial number checking to
134// detect stale references aren't possible (though we may be able to get similar benefits with other
135// approaches).
136//
137// TODO: consider a "lastDeleteIndex" for quick hole-filling when an add immediately follows a
138// delete; must invalidate after segment pop might be worth only using it for JNI globals.
139//
140// TODO: may want completely different add/remove algorithms for global and local refs to improve
141// performance. A large circular buffer might reduce the amortized cost of adding global
142// references.
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700143
Andreas Gampee03662b2016-10-13 17:12:56 -0700144// The state of the current segment. We only store the index. Splitting it for index and hole
145// count restricts the range too much.
146struct IRTSegmentState {
147 uint32_t top_index;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700148};
149
Andreas Gampee03662b2016-10-13 17:12:56 -0700150// Use as initial value for "cookie", and when table has only one segment.
151static constexpr IRTSegmentState kIRTFirstSegment = { 0 };
152
Mathieu Chartier4838d662014-09-25 15:27:43 -0700153// Try to choose kIRTPrevCount so that sizeof(IrtEntry) is a power of 2.
154// Contains multiple entries but only one active one, this helps us detect use after free errors
155// since the serial stored in the indirect ref wont match.
Andreas Gampedc061d02016-10-24 13:19:37 -0700156static constexpr size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
157
Hiroshi Yamauchi9e47bfa2015-02-23 11:14:40 -0800158class IrtEntry {
Mathieu Chartier4838d662014-09-25 15:27:43 -0700159 public:
Mathieu Chartier8778c522016-10-04 19:06:30 -0700160 void Add(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
161
Mathieu Chartier4838d662014-09-25 15:27:43 -0700162 GcRoot<mirror::Object>* GetReference() {
163 DCHECK_LT(serial_, kIRTPrevCount);
164 return &references_[serial_];
165 }
Mathieu Chartier8778c522016-10-04 19:06:30 -0700166
Andreas Gampee03662b2016-10-13 17:12:56 -0700167 const GcRoot<mirror::Object>* GetReference() const {
168 DCHECK_LT(serial_, kIRTPrevCount);
169 return &references_[serial_];
170 }
171
Mathieu Chartier4838d662014-09-25 15:27:43 -0700172 uint32_t GetSerial() const {
173 return serial_;
174 }
Mathieu Chartier8778c522016-10-04 19:06:30 -0700175
176 void SetReference(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier4838d662014-09-25 15:27:43 -0700177
178 private:
179 uint32_t serial_;
180 GcRoot<mirror::Object> references_[kIRTPrevCount];
181};
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700182static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t),
Hiroshi Yamauchi9e47bfa2015-02-23 11:14:40 -0800183 "Unexpected sizeof(IrtEntry)");
Andreas Gampedc061d02016-10-24 13:19:37 -0700184static_assert(IsPowerOfTwo(sizeof(IrtEntry)), "Unexpected sizeof(IrtEntry)");
Mathieu Chartier4838d662014-09-25 15:27:43 -0700185
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700186class IrtIterator {
187 public:
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700188 IrtIterator(IrtEntry* table, size_t i, size_t capacity) REQUIRES_SHARED(Locks::mutator_lock_)
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700189 : table_(table), i_(i), capacity_(capacity) {
Chih-Hung Hsieh4c84ca62018-01-16 10:15:40 -0800190 // capacity_ is used in some target; has warning with unused attribute.
191 UNUSED(capacity_);
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700192 }
193
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700194 IrtIterator& operator++() REQUIRES_SHARED(Locks::mutator_lock_) {
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700195 ++i_;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700196 return *this;
197 }
198
Mathieu Chartier8778c522016-10-04 19:06:30 -0700199 GcRoot<mirror::Object>* operator*() REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchiea2e1bd2014-06-18 13:47:35 -0700200 // This does not have a read barrier as this is used to visit roots.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700201 return table_[i_].GetReference();
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700202 }
203
204 bool equals(const IrtIterator& rhs) const {
205 return (i_ == rhs.i_ && table_ == rhs.table_);
206 }
207
208 private:
Mathieu Chartier4838d662014-09-25 15:27:43 -0700209 IrtEntry* const table_;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700210 size_t i_;
Ian Rogersc0542af2014-09-03 16:16:56 -0700211 const size_t capacity_;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700212};
213
Elliott Hughes726079d2011-10-07 18:43:44 -0700214bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) {
215 return lhs.equals(rhs);
216}
217
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700218bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) {
219 return !lhs.equals(rhs);
220}
221
222class IndirectReferenceTable {
223 public:
Andreas Gampe9d7ef622016-10-24 19:35:19 -0700224 enum class ResizableCapacity {
225 kNo,
226 kYes
227 };
228
Andreas Gampee03662b2016-10-13 17:12:56 -0700229 // WARNING: Construction of the IndirectReferenceTable may fail.
230 // error_msg must not be null. If error_msg is set by the constructor, then
231 // construction has failed and the IndirectReferenceTable will be in an
232 // invalid state. Use IsValid to check whether the object is in an invalid
233 // state.
Andreas Gampe9d7ef622016-10-24 19:35:19 -0700234 IndirectReferenceTable(size_t max_count,
235 IndirectRefKind kind,
236 ResizableCapacity resizable,
237 std::string* error_msg);
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700238
239 ~IndirectReferenceTable();
240
Richard Uhlerda0a69e2016-10-11 15:06:38 +0100241 /*
242 * Checks whether construction of the IndirectReferenceTable succeeded.
243 *
244 * This object must only be used if IsValid() returns true. It is safe to
245 * call IsValid from multiple threads without locking or other explicit
246 * synchronization.
247 */
Andreas Gampe3f5881f2015-04-08 10:26:16 -0700248 bool IsValid() const;
249
Andreas Gampee03662b2016-10-13 17:12:56 -0700250 // Add a new entry. "obj" must be a valid non-null object reference. This function will
Andreas Gampe25651122017-09-25 14:50:23 -0700251 // return null if an error happened (with an appropriate error message set).
252 IndirectRef Add(IRTSegmentState previous_state,
253 ObjPtr<mirror::Object> obj,
254 std::string* error_msg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700255 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700256
Andreas Gampee03662b2016-10-13 17:12:56 -0700257 // Given an IndirectRef in the table, return the Object it refers to.
258 //
259 // This function may abort under error conditions.
Hiroshi Yamauchi196851b2014-05-29 12:16:04 -0700260 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Mathieu Chartier8778c522016-10-04 19:06:30 -0700261 ObjPtr<mirror::Object> Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartierc56057e2014-05-04 13:18:58 -0700262 ALWAYS_INLINE;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700263
Mathieu Chartierc56057e2014-05-04 13:18:58 -0700264 // Synchronized get which reads a reference, acquiring a lock if necessary.
Hiroshi Yamauchi196851b2014-05-29 12:16:04 -0700265 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
Mathieu Chartier8778c522016-10-04 19:06:30 -0700266 ObjPtr<mirror::Object> SynchronizedGet(IndirectRef iref) const
267 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi196851b2014-05-29 12:16:04 -0700268 return Get<kReadBarrierOption>(iref);
Mathieu Chartierc56057e2014-05-04 13:18:58 -0700269 }
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700270
Andreas Gampee03662b2016-10-13 17:12:56 -0700271 // Updates an existing indirect reference to point to a new object.
Mathieu Chartier8778c522016-10-04 19:06:30 -0700272 void Update(IndirectRef iref, ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Hao39b6c242015-05-19 20:30:23 -0700273
Andreas Gampee03662b2016-10-13 17:12:56 -0700274 // Remove an existing entry.
275 //
276 // If the entry is not between the current top index and the bottom index
277 // specified by the cookie, we don't remove anything. This is the behavior
278 // required by JNI's DeleteLocalRef function.
279 //
280 // Returns "false" if nothing was removed.
281 bool Remove(IRTSegmentState previous_state, IndirectRef iref);
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700282
Mathieu Chartier8778c522016-10-04 19:06:30 -0700283 void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes726079d2011-10-07 18:43:44 -0700284
Andreas Gampe8a2a1fc2017-09-29 17:53:18 -0700285 void Dump(std::ostream& os) const
286 REQUIRES_SHARED(Locks::mutator_lock_)
287 REQUIRES(!Locks::alloc_tracker_lock_);
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700288
Andreas Gampee03662b2016-10-13 17:12:56 -0700289 // Return the #of entries in the entire table. This includes holes, and
290 // so may be larger than the actual number of "live" entries.
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700291 size_t Capacity() const {
Andreas Gampee03662b2016-10-13 17:12:56 -0700292 return segment_state_.top_index;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700293 }
294
Andreas Gampe88831082017-05-31 19:46:03 -0700295 // Ensure that at least free_capacity elements are available, or return false.
296 bool EnsureFreeCapacity(size_t free_capacity, std::string* error_msg)
297 REQUIRES_SHARED(Locks::mutator_lock_);
298 // See implementation of EnsureFreeCapacity. We'll only state here how much is trivially free,
299 // without recovering holes. Thus this is a conservative estimate.
Andreas Gampe1b35b462017-09-29 18:52:15 -0700300 size_t FreeCapacity() const;
Andreas Gampe88831082017-05-31 19:46:03 -0700301
Hiroshi Yamauchiea2e1bd2014-06-18 13:47:35 -0700302 // Note IrtIterator does not have a read barrier as it's used to visit roots.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700303 IrtIterator begin() {
304 return IrtIterator(table_, 0, Capacity());
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700305 }
306
Mathieu Chartier02e25112013-08-14 16:14:24 -0700307 IrtIterator end() {
308 return IrtIterator(table_, Capacity(), Capacity());
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700309 }
310
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700311 void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700312 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700313
Andreas Gampee03662b2016-10-13 17:12:56 -0700314 IRTSegmentState GetSegmentState() const {
315 return segment_state_;
Ian Rogersad25ac52011-10-04 19:13:33 -0700316 }
317
Andreas Gampee03662b2016-10-13 17:12:56 -0700318 void SetSegmentState(IRTSegmentState new_state);
Ian Rogersad25ac52011-10-04 19:13:33 -0700319
Andreas Gampe4d98c842015-12-09 15:14:04 -0800320 static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
321 // Note: Currently segment_state_ is at offset 0. We're testing the expected value in
322 // jni_internal_test to make sure it stays correct. It is not OFFSETOF_MEMBER, as that
323 // is not pointer-size-safe.
324 return Offset(0);
Ian Rogersdc51b792011-09-22 20:41:37 -0700325 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800326
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -0800327 // Release pages past the end of the table that may have previously held references.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700328 void Trim() REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier91c2f0c2014-11-26 11:21:15 -0800329
Andreas Gampedc061d02016-10-24 13:19:37 -0700330 // Determine what kind of indirect reference this is. Opposite of EncodeIndirectRefKind.
331 ALWAYS_INLINE static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) {
332 return DecodeIndirectRefKind(reinterpret_cast<uintptr_t>(iref));
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700333 }
334
Andreas Gampedc061d02016-10-24 13:19:37 -0700335 private:
336 static constexpr size_t kSerialBits = MinimumBitsToStore(kIRTPrevCount);
337 static constexpr uint32_t kShiftedSerialMask = (1u << kSerialBits) - 1;
338
339 static constexpr size_t kKindBits = MinimumBitsToStore(
340 static_cast<uint32_t>(IndirectRefKind::kLastKind));
341 static constexpr uint32_t kKindMask = (1u << kKindBits) - 1;
342
343 static constexpr uintptr_t EncodeIndex(uint32_t table_index) {
344 static_assert(sizeof(IndirectRef) == sizeof(uintptr_t), "Unexpected IndirectRef size");
345 DCHECK_LE(MinimumBitsToStore(table_index), BitSizeOf<uintptr_t>() - kSerialBits - kKindBits);
346 return (static_cast<uintptr_t>(table_index) << kKindBits << kSerialBits);
347 }
348 static constexpr uint32_t DecodeIndex(uintptr_t uref) {
349 return static_cast<uint32_t>((uref >> kKindBits) >> kSerialBits);
350 }
351
352 static constexpr uintptr_t EncodeIndirectRefKind(IndirectRefKind kind) {
353 return static_cast<uintptr_t>(kind);
354 }
355 static constexpr IndirectRefKind DecodeIndirectRefKind(uintptr_t uref) {
356 return static_cast<IndirectRefKind>(uref & kKindMask);
357 }
358
359 static constexpr uintptr_t EncodeSerial(uint32_t serial) {
360 DCHECK_LE(MinimumBitsToStore(serial), kSerialBits);
361 return serial << kKindBits;
362 }
363 static constexpr uint32_t DecodeSerial(uintptr_t uref) {
364 return static_cast<uint32_t>(uref >> kKindBits) & kShiftedSerialMask;
365 }
366
367 constexpr uintptr_t EncodeIndirectRef(uint32_t table_index, uint32_t serial) const {
368 DCHECK_LT(table_index, max_entries_);
369 return EncodeIndex(table_index) | EncodeSerial(serial) | EncodeIndirectRefKind(kind_);
370 }
371
372 static void ConstexprChecks();
373
374 // Extract the table index from an indirect reference.
375 ALWAYS_INLINE static uint32_t ExtractIndex(IndirectRef iref) {
376 return DecodeIndex(reinterpret_cast<uintptr_t>(iref));
377 }
378
379 IndirectRef ToIndirectRef(uint32_t table_index) const {
380 DCHECK_LT(table_index, max_entries_);
381 uint32_t serial = table_[table_index].GetSerial();
382 return reinterpret_cast<IndirectRef>(EncodeIndirectRef(table_index, serial));
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700383 }
384
Andreas Gampee03662b2016-10-13 17:12:56 -0700385 // Resize the backing table. Currently must be larger than the current size.
386 bool Resize(size_t new_size, std::string* error_msg);
387
388 void RecoverHoles(IRTSegmentState from);
389
Andreas Gampef1e86302016-10-03 11:42:31 -0700390 // Abort if check_jni is not enabled. Otherwise, just log as an error.
391 static void AbortIfNoCheckJNI(const std::string& msg);
Mathieu Chartierc56057e2014-05-04 13:18:58 -0700392
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700393 /* extra debugging checks */
Mathieu Chartier8778c522016-10-04 19:06:30 -0700394 bool GetChecked(IndirectRef) const REQUIRES_SHARED(Locks::mutator_lock_);
Andreas Gampee03662b2016-10-13 17:12:56 -0700395 bool CheckEntry(const char*, IndirectRef, uint32_t) const;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700396
Andreas Gampee03662b2016-10-13 17:12:56 -0700397 /// semi-public - read/write by jni down calls.
Ian Rogersdc51b792011-09-22 20:41:37 -0700398 IRTSegmentState segment_state_;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700399
Mathieu Chartierc56057e2014-05-04 13:18:58 -0700400 // Mem map where we store the indirect refs.
Ian Rogers700a4022014-05-19 16:49:03 -0700401 std::unique_ptr<MemMap> table_mem_map_;
Hiroshi Yamauchiea2e1bd2014-06-18 13:47:35 -0700402 // bottom of the stack. Do not directly access the object references
403 // in this as they are roots. Use Get() that has a read barrier.
Mathieu Chartier4838d662014-09-25 15:27:43 -0700404 IrtEntry* table_;
Andreas Gampee03662b2016-10-13 17:12:56 -0700405 // bit mask, ORed into all irefs.
Mathieu Chartier4838d662014-09-25 15:27:43 -0700406 const IndirectRefKind kind_;
Andreas Gampee03662b2016-10-13 17:12:56 -0700407
Andreas Gampe9d7ef622016-10-24 19:35:19 -0700408 // max #of entries allowed (modulo resizing).
Andreas Gampee03662b2016-10-13 17:12:56 -0700409 size_t max_entries_;
410
411 // Some values to retain old behavior with holes. Description of the algorithm is in the .cc
412 // file.
413 // TODO: Consider other data structures for compact tables, e.g., free lists.
414 size_t current_num_holes_;
415 IRTSegmentState last_known_previous_state_;
Andreas Gampe9d7ef622016-10-24 19:35:19 -0700416
417 // Whether the table's capacity may be resized. As there are no locks used, it is the caller's
418 // responsibility to ensure thread-safety.
419 ResizableCapacity resizable_;
Elliott Hughes6c1a3942011-08-17 15:00:06 -0700420};
421
422} // namespace art
423
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700424#endif // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_