Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Mathieu Chartier | c56057e | 2014-05-04 13:18:58 -0700 | [diff] [blame] | 17 | #include "indirect_reference_table-inl.h" |
| 18 | |
David Sehr | 1ce2b3b | 2018-04-05 11:02:03 -0700 | [diff] [blame] | 19 | #include "base/mutator_locked_dumpable.h" |
Mathieu Chartier | dabdc0f | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 20 | #include "base/systrace.h" |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 21 | #include "base/utils.h" |
Vladimir Marko | a3ad0cd | 2018-05-04 10:06:38 +0100 | [diff] [blame] | 22 | #include "jni/java_vm_ext.h" |
| 23 | #include "jni/jni_internal.h" |
Roland Levillain | 2e8aa8d | 2018-09-26 18:13:19 +0100 | [diff] [blame] | 24 | #include "mirror/object-inl.h" |
Mathieu Chartier | ff6d8cf | 2015-06-02 13:40:12 -0700 | [diff] [blame] | 25 | #include "nth_caller_visitor.h" |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 26 | #include "reference_table.h" |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 27 | #include "runtime.h" |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 28 | #include "scoped_thread_state_change-inl.h" |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 29 | #include "thread.h" |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 30 | |
| 31 | #include <cstdlib> |
| 32 | |
| 33 | namespace art { |
| 34 | |
Mathieu Chartier | 2ada67b | 2015-07-30 11:41:04 -0700 | [diff] [blame] | 35 | static constexpr bool kDumpStackOnNonLocalReference = false; |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 36 | static constexpr bool kDebugIRT = false; |
Mathieu Chartier | 2ada67b | 2015-07-30 11:41:04 -0700 | [diff] [blame] | 37 | |
Andreas Gampe | 0ece10d | 2017-05-31 20:09:28 -0700 | [diff] [blame] | 38 | // Maximum table size we allow. |
| 39 | static constexpr size_t kMaxTableSizeInBytes = 128 * MB; |
| 40 | |
Andreas Gampe | f1e8630 | 2016-10-03 11:42:31 -0700 | [diff] [blame] | 41 | const char* GetIndirectRefKindString(const IndirectRefKind& kind) { |
| 42 | switch (kind) { |
| 43 | case kHandleScopeOrInvalid: |
| 44 | return "HandleScopeOrInvalid"; |
| 45 | case kLocal: |
| 46 | return "Local"; |
| 47 | case kGlobal: |
| 48 | return "Global"; |
| 49 | case kWeakGlobal: |
| 50 | return "WeakGlobal"; |
| 51 | } |
| 52 | return "IndirectRefKind Error"; |
| 53 | } |
| 54 | |
Andreas Gampe | f1e8630 | 2016-10-03 11:42:31 -0700 | [diff] [blame] | 55 | void IndirectReferenceTable::AbortIfNoCheckJNI(const std::string& msg) { |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 56 | // If -Xcheck:jni is on, it'll give a more detailed error before aborting. |
Ian Rogers | 68d8b42 | 2014-07-17 11:09:10 -0700 | [diff] [blame] | 57 | JavaVMExt* vm = Runtime::Current()->GetJavaVM(); |
| 58 | if (!vm->IsCheckJniEnabled()) { |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 59 | // Otherwise, we want to abort rather than hand back a bad reference. |
Andreas Gampe | f1e8630 | 2016-10-03 11:42:31 -0700 | [diff] [blame] | 60 | LOG(FATAL) << msg; |
| 61 | } else { |
| 62 | LOG(ERROR) << msg; |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 63 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 64 | } |
| 65 | |
Andreas Gampe | a8e3b86 | 2016-10-17 20:12:52 -0700 | [diff] [blame] | 66 | IndirectReferenceTable::IndirectReferenceTable(size_t max_count, |
| 67 | IndirectRefKind desired_kind, |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 68 | ResizableCapacity resizable, |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 69 | std::string* error_msg) |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 70 | : segment_state_(kIRTFirstSegment), |
| 71 | kind_(desired_kind), |
| 72 | max_entries_(max_count), |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 73 | current_num_holes_(0), |
| 74 | resizable_(resizable) { |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 75 | CHECK(error_msg != nullptr); |
Andreas Gampe | a8e3b86 | 2016-10-17 20:12:52 -0700 | [diff] [blame] | 76 | CHECK_NE(desired_kind, kHandleScopeOrInvalid); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 77 | |
Andreas Gampe | 0ece10d | 2017-05-31 20:09:28 -0700 | [diff] [blame] | 78 | // Overflow and maximum check. |
| 79 | CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry)); |
| 80 | |
Andreas Gampe | a8e3b86 | 2016-10-17 20:12:52 -0700 | [diff] [blame] | 81 | const size_t table_bytes = max_count * sizeof(IrtEntry); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 82 | table_mem_map_ = MemMap::MapAnonymous("indirect ref table", |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 83 | table_bytes, |
| 84 | PROT_READ | PROT_WRITE, |
Vladimir Marko | 1130659 | 2018-10-26 14:22:59 +0100 | [diff] [blame] | 85 | /*low_4gb=*/ false, |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 86 | error_msg); |
| 87 | if (!table_mem_map_.IsValid() && error_msg->empty()) { |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 88 | *error_msg = "Unable to map memory for indirect ref table"; |
Andreas Gampe | 3f5881f | 2015-04-08 10:26:16 -0700 | [diff] [blame] | 89 | } |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 90 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 91 | if (table_mem_map_.IsValid()) { |
| 92 | table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin()); |
Richard Uhler | da0a69e | 2016-10-11 15:06:38 +0100 | [diff] [blame] | 93 | } else { |
| 94 | table_ = nullptr; |
| 95 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 96 | segment_state_ = kIRTFirstSegment; |
Andreas Gampe | 94a5202 | 2016-10-25 12:01:48 -0700 | [diff] [blame] | 97 | last_known_previous_state_ = kIRTFirstSegment; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | IndirectReferenceTable::~IndirectReferenceTable() { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 101 | } |
| 102 | |
Andreas Gampe | dc061d0 | 2016-10-24 13:19:37 -0700 | [diff] [blame] | 103 | void IndirectReferenceTable::ConstexprChecks() { |
| 104 | // Use this for some assertions. They can't be put into the header as C++ wants the class |
| 105 | // to be complete. |
| 106 | |
| 107 | // Check kind. |
| 108 | static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error"); |
| 109 | static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error"); |
| 110 | static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error"); |
| 111 | static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal, |
| 112 | "Kind encoding error"); |
| 113 | static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal, |
| 114 | "Kind encoding error"); |
| 115 | static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal, |
| 116 | "Kind encoding error"); |
| 117 | |
| 118 | // Check serial. |
| 119 | static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error"); |
| 120 | static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error"); |
| 121 | static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error"); |
| 122 | static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error"); |
| 123 | |
| 124 | // Table index. |
| 125 | static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error"); |
| 126 | static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error"); |
| 127 | static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error"); |
| 128 | static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error"); |
| 129 | } |
| 130 | |
Andreas Gampe | 3f5881f | 2015-04-08 10:26:16 -0700 | [diff] [blame] | 131 | bool IndirectReferenceTable::IsValid() const { |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 132 | return table_mem_map_.IsValid(); |
Andreas Gampe | 3f5881f | 2015-04-08 10:26:16 -0700 | [diff] [blame] | 133 | } |
| 134 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 135 | // Holes: |
| 136 | // |
| 137 | // To keep the IRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove |
| 138 | // operation sequences. For simplicity and lower memory overhead, we do not use a free list or |
| 139 | // similar. Instead, we scan for holes, with the expectation that we will find holes fast as they |
| 140 | // are usually near the end of the table (see the header, TODO: verify this assumption). To avoid |
| 141 | // scans when there are no holes, the number of known holes should be tracked. |
| 142 | // |
| 143 | // A previous implementation stored the top index and the number of holes as the segment state. |
| 144 | // This constraints the maximum number of references to 16-bit. We want to relax this, as it |
| 145 | // is easy to require more references (e.g., to list all classes in large applications). Thus, |
| 146 | // the implicitly stack-stored state, the IRTSegmentState, is only the top index. |
| 147 | // |
| 148 | // Thus, hole count is a local property of the current segment, and needs to be recovered when |
| 149 | // (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we |
| 150 | // cannot do work when the segment changes. Thus, Add and Remove need to ensure the current |
| 151 | // hole count is correct. |
| 152 | // |
| 153 | // To be able to detect segment changes, we require an additional local field that can describe |
| 154 | // the known segment. This is last_known_previous_state_. The requirement will become clear with |
| 155 | // the following (some non-trivial) cases that have to be supported: |
| 156 | // |
| 157 | // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference |
| 158 | // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference |
| 159 | // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove |
| 160 | // reference |
| 161 | // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference |
| 162 | // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove |
| 163 | // reference |
| 164 | // |
| 165 | // Storing the last known *previous* state (bottom index) allows conservatively detecting all the |
| 166 | // segment changes above. The condition is simply that the last known state is greater than or |
| 167 | // equal to the current previous state, and smaller than the current state (top index). The |
| 168 | // condition is conservative as it adds O(1) overhead to operations on an empty segment. |
| 169 | |
| 170 | static size_t CountNullEntries(const IrtEntry* table, size_t from, size_t to) { |
| 171 | size_t count = 0; |
| 172 | for (size_t index = from; index != to; ++index) { |
| 173 | if (table[index].GetReference()->IsNull()) { |
| 174 | count++; |
| 175 | } |
| 176 | } |
| 177 | return count; |
| 178 | } |
| 179 | |
| 180 | void IndirectReferenceTable::RecoverHoles(IRTSegmentState prev_state) { |
| 181 | if (last_known_previous_state_.top_index >= segment_state_.top_index || |
| 182 | last_known_previous_state_.top_index < prev_state.top_index) { |
| 183 | const size_t top_index = segment_state_.top_index; |
| 184 | size_t count = CountNullEntries(table_, prev_state.top_index, top_index); |
| 185 | |
| 186 | if (kDebugIRT) { |
| 187 | LOG(INFO) << "+++ Recovered holes: " |
| 188 | << " Current prev=" << prev_state.top_index |
| 189 | << " Current top_index=" << top_index |
| 190 | << " Old num_holes=" << current_num_holes_ |
| 191 | << " New num_holes=" << count; |
| 192 | } |
| 193 | |
| 194 | current_num_holes_ = count; |
| 195 | last_known_previous_state_ = prev_state; |
| 196 | } else if (kDebugIRT) { |
| 197 | LOG(INFO) << "No need to recover holes"; |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | ALWAYS_INLINE |
| 202 | static inline void CheckHoleCount(IrtEntry* table, |
| 203 | size_t exp_num_holes, |
| 204 | IRTSegmentState prev_state, |
| 205 | IRTSegmentState cur_state) { |
| 206 | if (kIsDebugBuild) { |
| 207 | size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index); |
| 208 | CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index |
| 209 | << " topIndex=" << cur_state.top_index; |
| 210 | } |
| 211 | } |
| 212 | |
| 213 | bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) { |
| 214 | CHECK_GT(new_size, max_entries_); |
| 215 | |
Andreas Gampe | 0ece10d | 2017-05-31 20:09:28 -0700 | [diff] [blame] | 216 | constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(IrtEntry); |
| 217 | if (new_size > kMaxEntries) { |
| 218 | *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size); |
| 219 | return false; |
| 220 | } |
| 221 | // Note: the above check also ensures that there is no overflow below. |
| 222 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 223 | const size_t table_bytes = new_size * sizeof(IrtEntry); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 224 | MemMap new_map = MemMap::MapAnonymous("indirect ref table", |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 225 | table_bytes, |
| 226 | PROT_READ | PROT_WRITE, |
Vladimir Marko | 1130659 | 2018-10-26 14:22:59 +0100 | [diff] [blame] | 227 | /*low_4gb=*/ false, |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 228 | error_msg); |
| 229 | if (!new_map.IsValid()) { |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 230 | return false; |
| 231 | } |
| 232 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 233 | memcpy(new_map.Begin(), table_mem_map_.Begin(), table_mem_map_.Size()); |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 234 | table_mem_map_ = std::move(new_map); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 235 | table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin()); |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 236 | max_entries_ = new_size; |
| 237 | |
| 238 | return true; |
| 239 | } |
| 240 | |
| 241 | IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state, |
Andreas Gampe | 2565112 | 2017-09-25 14:50:23 -0700 | [diff] [blame] | 242 | ObjPtr<mirror::Object> obj, |
| 243 | std::string* error_msg) { |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 244 | if (kDebugIRT) { |
| 245 | LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index |
| 246 | << " top_index=" << segment_state_.top_index |
| 247 | << " last_known_prev_top_index=" << last_known_previous_state_.top_index |
| 248 | << " holes=" << current_num_holes_; |
| 249 | } |
| 250 | |
| 251 | size_t top_index = segment_state_.top_index; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 252 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 253 | CHECK(obj != nullptr); |
Mathieu Chartier | 9d156d5 | 2016-10-06 17:44:26 -0700 | [diff] [blame] | 254 | VerifyObject(obj); |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 255 | DCHECK(table_ != nullptr); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 256 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 257 | if (top_index == max_entries_) { |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 258 | if (resizable_ == ResizableCapacity::kNo) { |
Andreas Gampe | 2565112 | 2017-09-25 14:50:23 -0700 | [diff] [blame] | 259 | std::ostringstream oss; |
| 260 | oss << "JNI ERROR (app bug): " << kind_ << " table overflow " |
| 261 | << "(max=" << max_entries_ << ")" |
| 262 | << MutatorLockedDumpable<IndirectReferenceTable>(*this); |
| 263 | *error_msg = oss.str(); |
| 264 | return nullptr; |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | // Try to double space. |
Andreas Gampe | 0ece10d | 2017-05-31 20:09:28 -0700 | [diff] [blame] | 268 | if (std::numeric_limits<size_t>::max() / 2 < max_entries_) { |
Andreas Gampe | 2565112 | 2017-09-25 14:50:23 -0700 | [diff] [blame] | 269 | std::ostringstream oss; |
| 270 | oss << "JNI ERROR (app bug): " << kind_ << " table overflow " |
| 271 | << "(max=" << max_entries_ << ")" << std::endl |
| 272 | << MutatorLockedDumpable<IndirectReferenceTable>(*this) |
| 273 | << " Resizing failed: exceeds size_t"; |
| 274 | *error_msg = oss.str(); |
| 275 | return nullptr; |
Andreas Gampe | 0ece10d | 2017-05-31 20:09:28 -0700 | [diff] [blame] | 276 | } |
| 277 | |
Andreas Gampe | 2565112 | 2017-09-25 14:50:23 -0700 | [diff] [blame] | 278 | std::string inner_error_msg; |
| 279 | if (!Resize(max_entries_ * 2, &inner_error_msg)) { |
| 280 | std::ostringstream oss; |
| 281 | oss << "JNI ERROR (app bug): " << kind_ << " table overflow " |
| 282 | << "(max=" << max_entries_ << ")" << std::endl |
| 283 | << MutatorLockedDumpable<IndirectReferenceTable>(*this) |
| 284 | << " Resizing failed: " << inner_error_msg; |
| 285 | *error_msg = oss.str(); |
| 286 | return nullptr; |
Andreas Gampe | 9d7ef62 | 2016-10-24 19:35:19 -0700 | [diff] [blame] | 287 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 288 | } |
| 289 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 290 | RecoverHoles(previous_state); |
| 291 | CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); |
| 292 | |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 293 | // We know there's enough room in the table. Now we just need to find |
| 294 | // the right spot. If there's a hole, find it and fill it; otherwise, |
| 295 | // add to the end of the list. |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 296 | IndirectRef result; |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 297 | size_t index; |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 298 | if (current_num_holes_ > 0) { |
| 299 | DCHECK_GT(top_index, 1U); |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 300 | // Find the first hole; likely to be near the end of the list. |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 301 | IrtEntry* p_scan = &table_[top_index - 1]; |
| 302 | DCHECK(!p_scan->GetReference()->IsNull()); |
| 303 | --p_scan; |
| 304 | while (!p_scan->GetReference()->IsNull()) { |
| 305 | DCHECK_GE(p_scan, table_ + previous_state.top_index); |
| 306 | --p_scan; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 307 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 308 | index = p_scan - table_; |
| 309 | current_num_holes_--; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 310 | } else { |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 311 | // Add to the end. |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 312 | index = top_index++; |
| 313 | segment_state_.top_index = top_index; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 314 | } |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 315 | table_[index].Add(obj); |
| 316 | result = ToIndirectRef(index); |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 317 | if (kDebugIRT) { |
| 318 | LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index |
| 319 | << " holes=" << current_num_holes_; |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 320 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 321 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 322 | DCHECK(result != nullptr); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 323 | return result; |
| 324 | } |
| 325 | |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 326 | void IndirectReferenceTable::AssertEmpty() { |
Hiroshi Yamauchi | 8a74117 | 2014-09-08 13:22:56 -0700 | [diff] [blame] | 327 | for (size_t i = 0; i < Capacity(); ++i) { |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 328 | if (!table_[i].GetReference()->IsNull()) { |
Hiroshi Yamauchi | 8a74117 | 2014-09-08 13:22:56 -0700 | [diff] [blame] | 329 | LOG(FATAL) << "Internal Error: non-empty local reference table\n" |
| 330 | << MutatorLockedDumpable<IndirectReferenceTable>(*this); |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 331 | UNREACHABLE(); |
Hiroshi Yamauchi | 8a74117 | 2014-09-08 13:22:56 -0700 | [diff] [blame] | 332 | } |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 333 | } |
| 334 | } |
| 335 | |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 336 | // Removes an object. We extract the table offset bits from "iref" |
| 337 | // and zap the corresponding entry, leaving a hole if it's not at the top. |
| 338 | // If the entry is not between the current top index and the bottom index |
| 339 | // specified by the cookie, we don't remove anything. This is the behavior |
| 340 | // required by JNI's DeleteLocalRef function. |
| 341 | // This method is not called when a local frame is popped; this is only used |
| 342 | // for explicit single removals. |
| 343 | // Returns "false" if nothing was removed. |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 344 | bool IndirectReferenceTable::Remove(IRTSegmentState previous_state, IndirectRef iref) { |
| 345 | if (kDebugIRT) { |
| 346 | LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index |
| 347 | << " top_index=" << segment_state_.top_index |
| 348 | << " last_known_prev_top_index=" << last_known_previous_state_.top_index |
| 349 | << " holes=" << current_num_holes_; |
| 350 | } |
| 351 | |
| 352 | const uint32_t top_index = segment_state_.top_index; |
| 353 | const uint32_t bottom_index = previous_state.top_index; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 354 | |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 355 | DCHECK(table_ != nullptr); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 356 | |
Mathieu Chartier | c263bf8 | 2015-04-29 09:57:48 -0700 | [diff] [blame] | 357 | if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) { |
| 358 | auto* self = Thread::Current(); |
| 359 | if (self->HandleScopeContains(reinterpret_cast<jobject>(iref))) { |
| 360 | auto* env = self->GetJniEnv(); |
| 361 | DCHECK(env != nullptr); |
Ian Rogers | 55256cb | 2017-12-21 17:07:11 -0800 | [diff] [blame] | 362 | if (env->IsCheckJniEnabled()) { |
Mathieu Chartier | ff6d8cf | 2015-06-02 13:40:12 -0700 | [diff] [blame] | 363 | ScopedObjectAccess soa(self); |
| 364 | LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread"; |
Mathieu Chartier | 2ada67b | 2015-07-30 11:41:04 -0700 | [diff] [blame] | 365 | if (kDumpStackOnNonLocalReference) { |
Andreas Gampe | 3fec9ac | 2016-09-13 10:47:28 -0700 | [diff] [blame] | 366 | self->Dump(LOG_STREAM(WARNING)); |
Mathieu Chartier | 2ada67b | 2015-07-30 11:41:04 -0700 | [diff] [blame] | 367 | } |
Mathieu Chartier | c263bf8 | 2015-04-29 09:57:48 -0700 | [diff] [blame] | 368 | } |
| 369 | return true; |
| 370 | } |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 371 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 372 | const uint32_t idx = ExtractIndex(iref); |
| 373 | if (idx < bottom_index) { |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 374 | // Wrong segment. |
| 375 | LOG(WARNING) << "Attempt to remove index outside index area (" << idx |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 376 | << " vs " << bottom_index << "-" << top_index << ")"; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 377 | return false; |
| 378 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 379 | if (idx >= top_index) { |
Elliott Hughes | 726079d | 2011-10-07 18:43:44 -0700 | [diff] [blame] | 380 | // Bad --- stale reference? |
| 381 | LOG(WARNING) << "Attempt to remove invalid index " << idx |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 382 | << " (bottom=" << bottom_index << " top=" << top_index << ")"; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 383 | return false; |
| 384 | } |
| 385 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 386 | RecoverHoles(previous_state); |
| 387 | CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); |
| 388 | |
| 389 | if (idx == top_index - 1) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 390 | // Top-most entry. Scan up and consume holes. |
| 391 | |
Ian Rogers | 987560f | 2014-04-22 11:42:59 -0700 | [diff] [blame] | 392 | if (!CheckEntry("remove", iref, idx)) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 393 | return false; |
| 394 | } |
| 395 | |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 396 | *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr); |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 397 | if (current_num_holes_ != 0) { |
| 398 | uint32_t collapse_top_index = top_index; |
| 399 | while (--collapse_top_index > bottom_index && current_num_holes_ != 0) { |
| 400 | if (kDebugIRT) { |
| 401 | ScopedObjectAccess soa(Thread::Current()); |
| 402 | LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1 |
| 403 | << " (previous_state=" << bottom_index << ") val=" |
| 404 | << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>(); |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 405 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 406 | if (!table_[collapse_top_index - 1].GetReference()->IsNull()) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 407 | break; |
| 408 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 409 | if (kDebugIRT) { |
| 410 | LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1); |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 411 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 412 | current_num_holes_--; |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 413 | } |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 414 | segment_state_.top_index = collapse_top_index; |
| 415 | |
| 416 | CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 417 | } else { |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 418 | segment_state_.top_index = top_index - 1; |
| 419 | if (kDebugIRT) { |
| 420 | LOG(INFO) << "+++ ate last entry " << top_index - 1; |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 421 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 422 | } |
| 423 | } else { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 424 | // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody |
| 425 | // from deleting it twice and screwing up the hole count. |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 426 | if (table_[idx].GetReference()->IsNull()) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 427 | LOG(INFO) << "--- WEIRD: removing null entry " << idx; |
| 428 | return false; |
| 429 | } |
Ian Rogers | 987560f | 2014-04-22 11:42:59 -0700 | [diff] [blame] | 430 | if (!CheckEntry("remove", iref, idx)) { |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 431 | return false; |
| 432 | } |
| 433 | |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 434 | *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr); |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 435 | current_num_holes_++; |
| 436 | CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_); |
| 437 | if (kDebugIRT) { |
| 438 | LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_; |
Ian Rogers | 5a7a74a | 2011-09-26 16:32:29 -0700 | [diff] [blame] | 439 | } |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 440 | } |
| 441 | |
| 442 | return true; |
| 443 | } |
| 444 | |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 445 | void IndirectReferenceTable::Trim() { |
Mathieu Chartier | dabdc0f | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 446 | ScopedTrace trace(__PRETTY_FUNCTION__); |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 447 | const size_t top_index = Capacity(); |
| 448 | auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize); |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 449 | uint8_t* release_end = table_mem_map_.End(); |
Mathieu Chartier | 91c2f0c | 2014-11-26 11:21:15 -0800 | [diff] [blame] | 450 | madvise(release_start, release_end - release_start, MADV_DONTNEED); |
| 451 | } |
| 452 | |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 453 | void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) { |
Mathieu Chartier | 4809d0a | 2015-04-07 10:39:04 -0700 | [diff] [blame] | 454 | BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info); |
Mathieu Chartier | 02e2511 | 2013-08-14 16:14:24 -0700 | [diff] [blame] | 455 | for (auto ref : *this) { |
Mathieu Chartier | 9086b65 | 2015-04-14 09:35:18 -0700 | [diff] [blame] | 456 | if (!ref->IsNull()) { |
| 457 | root_visitor.VisitRoot(*ref); |
| 458 | DCHECK(!ref->IsNull()); |
| 459 | } |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 460 | } |
| 461 | } |
| 462 | |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 463 | void IndirectReferenceTable::Dump(std::ostream& os) const { |
| 464 | os << kind_ << " table dump:\n"; |
Hiroshi Yamauchi | 196851b | 2014-05-29 12:16:04 -0700 | [diff] [blame] | 465 | ReferenceTable::Table entries; |
| 466 | for (size_t i = 0; i < Capacity(); ++i) { |
Mathieu Chartier | 8778c52 | 2016-10-04 19:06:30 -0700 | [diff] [blame] | 467 | ObjPtr<mirror::Object> obj = table_[i].GetReference()->Read<kWithoutReadBarrier>(); |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 468 | if (obj != nullptr) { |
Mathieu Chartier | 4838d66 | 2014-09-25 15:27:43 -0700 | [diff] [blame] | 469 | obj = table_[i].GetReference()->Read(); |
Hiroshi Yamauchi | 94f7b49 | 2014-07-22 18:08:23 -0700 | [diff] [blame] | 470 | entries.push_back(GcRoot<mirror::Object>(obj)); |
Ian Rogers | 63818dc | 2012-09-26 12:23:04 -0700 | [diff] [blame] | 471 | } |
| 472 | } |
Elliott Hughes | 73e66f7 | 2012-05-09 09:34:45 -0700 | [diff] [blame] | 473 | ReferenceTable::Dump(os, entries); |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 474 | } |
| 475 | |
Andreas Gampe | e03662b | 2016-10-13 17:12:56 -0700 | [diff] [blame] | 476 | void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) { |
| 477 | if (kDebugIRT) { |
| 478 | LOG(INFO) << "Setting segment state: " |
| 479 | << segment_state_.top_index |
| 480 | << " -> " |
| 481 | << new_state.top_index; |
| 482 | } |
| 483 | segment_state_ = new_state; |
| 484 | } |
| 485 | |
Andreas Gampe | 8883108 | 2017-05-31 19:46:03 -0700 | [diff] [blame] | 486 | bool IndirectReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) { |
| 487 | size_t top_index = segment_state_.top_index; |
| 488 | if (top_index < max_entries_ && top_index + free_capacity <= max_entries_) { |
| 489 | return true; |
| 490 | } |
| 491 | |
| 492 | // We're only gonna do a simple best-effort here, ensuring the asked-for capacity at the end. |
| 493 | if (resizable_ == ResizableCapacity::kNo) { |
| 494 | *error_msg = "Table is not resizable"; |
| 495 | return false; |
| 496 | } |
| 497 | |
| 498 | // Try to increase the table size. |
| 499 | |
| 500 | // Would this overflow? |
| 501 | if (std::numeric_limits<size_t>::max() - free_capacity < top_index) { |
| 502 | *error_msg = "Cannot resize table, overflow."; |
| 503 | return false; |
| 504 | } |
| 505 | |
| 506 | if (!Resize(top_index + free_capacity, error_msg)) { |
| 507 | LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity |
| 508 | << "): " << std::endl |
| 509 | << MutatorLockedDumpable<IndirectReferenceTable>(*this) |
| 510 | << " Resizing failed: " << *error_msg; |
| 511 | return false; |
| 512 | } |
| 513 | return true; |
| 514 | } |
| 515 | |
Andreas Gampe | 1b35b46 | 2017-09-29 18:52:15 -0700 | [diff] [blame] | 516 | size_t IndirectReferenceTable::FreeCapacity() const { |
Andreas Gampe | 8883108 | 2017-05-31 19:46:03 -0700 | [diff] [blame] | 517 | return max_entries_ - segment_state_.top_index; |
| 518 | } |
| 519 | |
Elliott Hughes | 6c1a394 | 2011-08-17 15:00:06 -0700 | [diff] [blame] | 520 | } // namespace art |