blob: be061be1edba28b3dee2b0b031b2dbd397f58cc8 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Brian Carlstrom7e93b502011-08-04 14:16:22 -070016
17#include "intern_table.h"
18
Ian Rogers700a4022014-05-19 16:49:03 -070019#include <memory>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "gc_root-inl.h"
Mathieu Chartier97509952015-07-13 14:35:43 -070022#include "gc/collector/garbage_collector.h"
Ian Rogers7dfb28c2013-08-22 08:18:36 -070023#include "gc/space/image_space.h"
Mathieu Chartier14c3bf92015-07-13 14:35:43 -070024#include "gc/weak_root_state.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080025#include "image-inl.h"
Vladimir Marko05792b92015-08-03 11:56:49 +010026#include "mirror/dex_cache-inl.h"
Ian Rogers7dfb28c2013-08-22 08:18:36 -070027#include "mirror/object_array-inl.h"
28#include "mirror/object-inl.h"
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070029#include "mirror/string-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080030#include "thread.h"
Elliott Hughes814e4032011-08-23 12:07:56 -070031#include "utf.h"
Brian Carlstrom7e93b502011-08-04 14:16:22 -070032
33namespace art {
34
Ian Rogers7dfb28c2013-08-22 08:18:36 -070035InternTable::InternTable()
Mathieu Chartierea0831f2015-12-29 13:17:37 -080036 : images_added_to_intern_table_(false),
37 log_new_roots_(false),
Mathieu Chartier14c3bf92015-07-13 14:35:43 -070038 weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
39 weak_root_state_(gc::kWeakRootStateNormal) {
Mathieu Chartierc11d9b82013-09-19 10:01:59 -070040}
Elliott Hughesde69d7f2011-08-18 16:49:37 -070041
Brian Carlstroma663ea52011-08-19 23:33:41 -070042size_t InternTable::Size() const {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +010043 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070044 return strong_interns_.Size() + weak_interns_.Size();
Brian Carlstroma663ea52011-08-19 23:33:41 -070045}
46
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070047size_t InternTable::StrongSize() const {
48 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070049 return strong_interns_.Size();
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070050}
51
52size_t InternTable::WeakSize() const {
53 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070054 return weak_interns_.Size();
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070055}
56
Elliott Hughescac6cc72011-11-03 20:31:21 -070057void InternTable::DumpForSigQuit(std::ostream& os) const {
Mathieu Chartiereb175f72014-10-31 11:49:27 -070058 os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
Elliott Hughescac6cc72011-11-03 20:31:21 -070059}
60
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070061void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +010062 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -080063 if ((flags & kVisitRootFlagAllRoots) != 0) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070064 strong_interns_.VisitRoots(visitor);
Mathieu Chartier893263b2014-03-04 11:07:42 -080065 } else if ((flags & kVisitRootFlagNewRoots) != 0) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070066 for (auto& root : new_strong_intern_roots_) {
67 mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070068 root.VisitRoot(visitor, RootInfo(kRootInternedString));
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070069 mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
Mathieu Chartierc2e20622014-11-03 11:41:47 -080070 if (new_ref != old_ref) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070071 // The GC moved a root in the log. Need to search the strong interns and update the
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070072 // corresponding object. This is slow, but luckily for us, this may only happen with a
73 // concurrent moving GC.
Mathieu Chartiereb175f72014-10-31 11:49:27 -070074 strong_interns_.Remove(old_ref);
75 strong_interns_.Insert(new_ref);
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070076 }
77 }
Mathieu Chartier893263b2014-03-04 11:07:42 -080078 }
Mathieu Chartier893263b2014-03-04 11:07:42 -080079 if ((flags & kVisitRootFlagClearRootLog) != 0) {
80 new_strong_intern_roots_.clear();
81 }
82 if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
83 log_new_roots_ = true;
84 } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
85 log_new_roots_ = false;
Ian Rogers1d54e732013-05-02 21:10:01 -070086 }
Mathieu Chartier423d2a32013-09-12 17:33:56 -070087 // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
Brian Carlstrom7e93b502011-08-04 14:16:22 -070088}
89
Mathieu Chartierfbc31082016-01-24 11:59:56 -080090mirror::String* InternTable::LookupWeak(Thread* self, mirror::String* s) {
91 MutexLock mu(self, *Locks::intern_table_lock_);
92 return LookupWeakLocked(s);
Mathieu Chartierf7fd9702015-11-09 11:16:49 -080093}
94
Mathieu Chartierfbc31082016-01-24 11:59:56 -080095mirror::String* InternTable::LookupStrong(Thread* self, mirror::String* s) {
96 MutexLock mu(self, *Locks::intern_table_lock_);
97 return LookupStrongLocked(s);
98}
99
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000100mirror::String* InternTable::LookupStrong(Thread* self,
101 uint32_t utf16_length,
102 const char* utf8_data) {
103 DCHECK_EQ(utf16_length, CountModifiedUtf8Chars(utf8_data));
104 Utf8String string(utf16_length,
105 utf8_data,
106 ComputeUtf16HashFromModifiedUtf8(utf8_data, utf16_length));
107 MutexLock mu(self, *Locks::intern_table_lock_);
108 return strong_interns_.Find(string);
109}
110
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800111mirror::String* InternTable::LookupWeakLocked(mirror::String* s) {
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +0000112 return weak_interns_.Find(s);
113}
114
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800115mirror::String* InternTable::LookupStrongLocked(mirror::String* s) {
116 return strong_interns_.Find(s);
117}
118
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800119void InternTable::AddNewTable() {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700120 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800121 weak_interns_.AddNewTable();
122 strong_interns_.AddNewTable();
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700123}
124
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700125mirror::String* InternTable::InsertStrong(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100126 Runtime* runtime = Runtime::Current();
127 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700128 runtime->RecordStrongStringInsertion(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100129 }
Mathieu Chartier893263b2014-03-04 11:07:42 -0800130 if (log_new_roots_) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700131 new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
Mathieu Chartier893263b2014-03-04 11:07:42 -0800132 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700133 strong_interns_.Insert(s);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800134 return s;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100135}
136
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700137mirror::String* InternTable::InsertWeak(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100138 Runtime* runtime = Runtime::Current();
139 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700140 runtime->RecordWeakStringInsertion(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100141 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700142 weak_interns_.Insert(s);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700143 return s;
144}
145
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700146void InternTable::RemoveStrong(mirror::String* s) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700147 strong_interns_.Remove(s);
Hiroshi Yamauchi1bd48722014-05-23 19:58:15 -0700148}
149
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700150void InternTable::RemoveWeak(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100151 Runtime* runtime = Runtime::Current();
152 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700153 runtime->RecordWeakStringRemoval(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100154 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700155 weak_interns_.Remove(s);
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700156}
157
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100158// Insert/remove methods used to undo changes made during an aborted transaction.
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700159mirror::String* InternTable::InsertStrongFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100160 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700161 return InsertStrong(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100162}
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700163mirror::String* InternTable::InsertWeakFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100164 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700165 return InsertWeak(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100166}
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700167void InternTable::RemoveStrongFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100168 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700169 RemoveStrong(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100170}
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700171void InternTable::RemoveWeakFromTransaction(mirror::String* s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100172 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700173 RemoveWeak(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100174}
175
Mathieu Chartier205b7622016-01-06 15:47:09 -0800176void InternTable::AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700177 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier205b7622016-01-06 15:47:09 -0800178 for (gc::space::ImageSpace* image_space : image_spaces) {
179 const ImageHeader* const header = &image_space->GetImageHeader();
180 // Check if we have the interned strings section.
181 const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
182 if (section.Size() > 0) {
183 AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
184 } else {
185 // TODO: Delete this logic?
186 mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
187 mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
188 for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
189 mirror::DexCache* dex_cache = dex_caches->Get(i);
190 const size_t num_strings = dex_cache->NumStrings();
191 for (size_t j = 0; j < num_strings; ++j) {
192 mirror::String* image_string = dex_cache->GetResolvedString(j);
193 if (image_string != nullptr) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800194 mirror::String* found = LookupStrongLocked(image_string);
Mathieu Chartier205b7622016-01-06 15:47:09 -0800195 if (found == nullptr) {
196 InsertStrong(image_string);
197 } else {
198 DCHECK_EQ(found, image_string);
199 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700200 }
201 }
202 }
203 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700204 }
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800205 images_added_to_intern_table_ = true;
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700206}
207
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700208mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
Mathieu Chartier205b7622016-01-06 15:47:09 -0800209 DCHECK(!images_added_to_intern_table_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800210 const std::vector<gc::space::ImageSpace*>& image_spaces =
Jeff Haodcdc85b2015-12-04 14:06:18 -0800211 Runtime::Current()->GetHeap()->GetBootImageSpaces();
212 if (image_spaces.empty()) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700213 return nullptr; // No image present.
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700214 }
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700215 const std::string utf8 = s->ToModifiedUtf8();
Jeff Haodcdc85b2015-12-04 14:06:18 -0800216 for (gc::space::ImageSpace* image_space : image_spaces) {
217 mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
218 mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
219 for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
220 mirror::DexCache* dex_cache = dex_caches->Get(i);
221 const DexFile* dex_file = dex_cache->GetDexFile();
222 // Binary search the dex file for the string index.
223 const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
224 if (string_id != nullptr) {
225 uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
226 // GetResolvedString() contains a RB.
227 mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
228 if (image_string != nullptr) {
229 return image_string;
230 }
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700231 }
232 }
233 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800234 return nullptr;
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700235}
236
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700237void InternTable::BroadcastForNewInterns() {
238 CHECK(kUseReadBarrier);
239 Thread* self = Thread::Current();
240 MutexLock mu(self, *Locks::intern_table_lock_);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700241 weak_intern_condition_.Broadcast(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700242}
243
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700244void InternTable::WaitUntilAccessible(Thread* self) {
245 Locks::intern_table_lock_->ExclusiveUnlock(self);
Mathieu Chartierf1d666e2015-09-03 16:13:34 -0700246 {
247 ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
248 MutexLock mu(self, *Locks::intern_table_lock_);
249 while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
250 weak_intern_condition_.Wait(self);
251 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700252 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700253 Locks::intern_table_lock_->ExclusiveLock(self);
254}
255
256mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool holding_locks) {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800257 if (s == nullptr) {
258 return nullptr;
259 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700260 Thread* const self = Thread::Current();
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100261 MutexLock mu(self, *Locks::intern_table_lock_);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700262 if (kDebugLocking && !holding_locks) {
263 Locks::mutator_lock_->AssertSharedHeld(self);
264 CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700265 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700266 while (true) {
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700267 if (holding_locks) {
268 if (!kUseReadBarrier) {
269 CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
270 } else {
271 CHECK(self->GetWeakRefAccessEnabled());
272 }
273 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700274 // Check the strong table for a match.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800275 mirror::String* strong = LookupStrongLocked(s);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700276 if (strong != nullptr) {
277 return strong;
278 }
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700279 if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
280 (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
281 break;
282 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700283 // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
284 // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
285 // cleared.
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700286 CHECK(!holding_locks);
287 StackHandleScope<1> hs(self);
288 auto h = hs.NewHandleWrapper(&s);
289 WaitUntilAccessible(self);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700290 }
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700291 if (!kUseReadBarrier) {
292 CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
293 } else {
294 CHECK(self->GetWeakRefAccessEnabled());
295 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800296 // There is no match in the strong table, check the weak table.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800297 mirror::String* weak = LookupWeakLocked(s);
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800298 if (weak != nullptr) {
299 if (is_strong) {
300 // A match was found in the weak table. Promote to the strong table.
301 RemoveWeak(weak);
302 return InsertStrong(weak);
303 }
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700304 return weak;
305 }
nikolay serdjuka446d862015-04-17 19:27:56 +0600306 // Check the image for a match.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800307 if (!images_added_to_intern_table_) {
308 mirror::String* const image_string = LookupStringFromImage(s);
309 if (image_string != nullptr) {
310 return is_strong ? InsertStrong(image_string) : InsertWeak(image_string);
311 }
nikolay serdjuka446d862015-04-17 19:27:56 +0600312 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800313 // No match in the strong table or the weak table. Insert into the strong / weak table.
314 return is_strong ? InsertStrong(s) : InsertWeak(s);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700315}
316
Mathieu Chartiered0fc1d2014-03-21 14:09:35 -0700317mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
318 DCHECK(utf8_data != nullptr);
Vladimir Markod2bdb9b2016-07-08 17:23:22 +0100319 Thread* self = Thread::Current();
320 // Try to avoid allocation.
321 mirror::String* s = LookupStrong(self, utf16_length, utf8_data);
322 if (s != nullptr) {
323 return s;
324 }
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700325 return InternStrong(mirror::String::AllocFromModifiedUtf8(
Vladimir Markod2bdb9b2016-07-08 17:23:22 +0100326 self, utf16_length, utf8_data));
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700327}
328
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800329mirror::String* InternTable::InternStrong(const char* utf8_data) {
Mathieu Chartiered0fc1d2014-03-21 14:09:35 -0700330 DCHECK(utf8_data != nullptr);
331 return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
Brian Carlstromc74255f2011-09-11 22:47:39 -0700332}
333
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700334mirror::String* InternTable::InternStrongImageString(mirror::String* s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700335 // May be holding the heap bitmap lock.
336 return Insert(s, true, true);
337}
338
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800339mirror::String* InternTable::InternStrong(mirror::String* s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700340 return Insert(s, true, false);
Brian Carlstromc74255f2011-09-11 22:47:39 -0700341}
342
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800343mirror::String* InternTable::InternWeak(mirror::String* s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700344 return Insert(s, false, false);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700345}
346
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800347bool InternTable::ContainsWeak(mirror::String* s) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800348 return LookupWeak(Thread::Current(), s) == s;
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700349}
350
Mathieu Chartier97509952015-07-13 14:35:43 -0700351void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100352 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -0700353 weak_interns_.SweepWeaks(visitor);
Brian Carlstroma663ea52011-08-19 23:33:41 -0700354}
355
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800356size_t InternTable::AddTableFromMemory(const uint8_t* ptr) {
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700357 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800358 return AddTableFromMemoryLocked(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700359}
360
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800361size_t InternTable::AddTableFromMemoryLocked(const uint8_t* ptr) {
362 return strong_interns_.AddTableFromMemory(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700363}
364
365size_t InternTable::WriteToMemory(uint8_t* ptr) {
366 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800367 return strong_interns_.WriteToMemory(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700368}
369
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800370std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700371 if (kIsDebugBuild) {
372 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
373 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800374 return static_cast<size_t>(root.Read()->GetHashCode());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700375}
376
377bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800378 const GcRoot<mirror::String>& b) const {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700379 if (kIsDebugBuild) {
380 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
381 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800382 return a.Read()->Equals(b.Read());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700383}
384
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000385bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
386 const Utf8String& b) const {
387 if (kIsDebugBuild) {
388 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
389 }
390 mirror::String* a_string = a.Read();
391 uint32_t a_length = static_cast<uint32_t>(a_string->GetLength());
392 if (a_length != b.GetUtf16Length()) {
393 return false;
394 }
jessicahandojo3aaa37b2016-07-29 14:46:37 -0700395 if (a_string->IsCompressed()) {
396 size_t b_byte_count = strlen(b.GetUtf8Data());
397 size_t b_utf8_length = CountModifiedUtf8Chars(b.GetUtf8Data(), b_byte_count);
398 // Modified UTF-8 single byte character range is 0x01 .. 0x7f
399 // The string compression occurs on regular ASCII with same exact range,
400 // not on extended ASCII which up to 0xff
401 const bool is_b_regular_ascii = (b_byte_count == b_utf8_length);
402 if (is_b_regular_ascii) {
403 return memcmp(b.GetUtf8Data(),
404 a_string->GetValueCompressed(), a_length * sizeof(uint8_t)) == 0;
405 } else {
406 return false;
407 }
408 } else {
409 const uint16_t* a_value = a_string->GetValue();
410 return CompareModifiedUtf8ToUtf16AsCodePointValues(b.GetUtf8Data(), a_value, a_length) == 0;
411 }
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000412}
413
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800414size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700415 size_t read_count = 0;
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800416 UnorderedSet set(ptr, /*make copy*/false, &read_count);
Mathieu Chartier619a4572016-04-05 19:13:37 -0700417 if (set.Empty()) {
418 // Avoid inserting empty sets.
419 return read_count;
420 }
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800421 // TODO: Disable this for app images if app images have intern tables.
422 static constexpr bool kCheckDuplicates = true;
423 if (kCheckDuplicates) {
424 for (GcRoot<mirror::String>& string : set) {
425 CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8();
426 }
427 }
Mathieu Chartier619a4572016-04-05 19:13:37 -0700428 // Insert at the front since we add new interns into the back.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800429 tables_.insert(tables_.begin(), std::move(set));
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700430 return read_count;
431}
432
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800433size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
434 if (tables_.empty()) {
435 return 0;
436 }
437 UnorderedSet* table_to_write;
438 UnorderedSet combined;
439 if (tables_.size() > 1) {
440 table_to_write = &combined;
441 for (UnorderedSet& table : tables_) {
442 for (GcRoot<mirror::String>& string : table) {
443 combined.Insert(string);
444 }
445 }
446 } else {
447 table_to_write = &tables_.back();
448 }
449 return table_to_write->WriteToMemory(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700450}
451
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700452void InternTable::Table::Remove(mirror::String* s) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800453 for (UnorderedSet& table : tables_) {
454 auto it = table.Find(GcRoot<mirror::String>(s));
455 if (it != table.end()) {
456 table.Erase(it);
457 return;
458 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700459 }
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800460 LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700461}
462
463mirror::String* InternTable::Table::Find(mirror::String* s) {
464 Locks::intern_table_lock_->AssertHeld(Thread::Current());
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800465 for (UnorderedSet& table : tables_) {
466 auto it = table.Find(GcRoot<mirror::String>(s));
467 if (it != table.end()) {
468 return it->Read();
469 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700470 }
471 return nullptr;
472}
473
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000474mirror::String* InternTable::Table::Find(const Utf8String& string) {
475 Locks::intern_table_lock_->AssertHeld(Thread::Current());
476 for (UnorderedSet& table : tables_) {
477 auto it = table.Find(string);
478 if (it != table.end()) {
479 return it->Read();
480 }
481 }
482 return nullptr;
483}
484
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800485void InternTable::Table::AddNewTable() {
486 tables_.push_back(UnorderedSet());
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700487}
488
489void InternTable::Table::Insert(mirror::String* s) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800490 // Always insert the last table, the image tables are before and we avoid inserting into these
491 // to prevent dirty pages.
492 DCHECK(!tables_.empty());
493 tables_.back().Insert(GcRoot<mirror::String>(s));
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700494}
495
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700496void InternTable::Table::VisitRoots(RootVisitor* visitor) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -0700497 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
498 visitor, RootInfo(kRootInternedString));
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800499 for (UnorderedSet& table : tables_) {
500 for (auto& intern : table) {
501 buffered_visitor.VisitRoot(intern);
502 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700503 }
504}
505
Mathieu Chartier97509952015-07-13 14:35:43 -0700506void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800507 for (UnorderedSet& table : tables_) {
508 SweepWeaks(&table, visitor);
509 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700510}
511
Mathieu Chartier97509952015-07-13 14:35:43 -0700512void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700513 for (auto it = set->begin(), end = set->end(); it != end;) {
514 // This does not need a read barrier because this is called by GC.
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800515 mirror::Object* object = it->Read<kWithoutReadBarrier>();
Mathieu Chartier97509952015-07-13 14:35:43 -0700516 mirror::Object* new_object = visitor->IsMarked(object);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700517 if (new_object == nullptr) {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800518 it = set->Erase(it);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700519 } else {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800520 *it = GcRoot<mirror::String>(new_object->AsString());
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700521 ++it;
522 }
523 }
524}
525
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800526size_t InternTable::Table::Size() const {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800527 return std::accumulate(tables_.begin(),
528 tables_.end(),
Mathieu Chartier205b7622016-01-06 15:47:09 -0800529 0U,
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800530 [](size_t sum, const UnorderedSet& set) {
531 return sum + set.Size();
532 });
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800533}
534
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700535void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
536 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
537 ChangeWeakRootStateLocked(new_state);
538}
539
540void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700541 CHECK(!kUseReadBarrier);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700542 weak_root_state_ = new_state;
543 if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
544 weak_intern_condition_.Broadcast(Thread::Current());
545 }
546}
547
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700548InternTable::Table::Table() {
549 Runtime* const runtime = Runtime::Current();
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800550 // Initial table.
551 tables_.push_back(UnorderedSet());
552 tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
553 runtime->GetHashTableMaxLoadFactor());
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700554}
555
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700556} // namespace art