blob: 2bac2312bff18593f9374bcc0543873efe63a80a [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Brian Carlstrom7e93b502011-08-04 14:16:22 -070016
17#include "intern_table.h"
18
Ian Rogers700a4022014-05-19 16:49:03 -070019#include <memory>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "gc_root-inl.h"
Mathieu Chartier97509952015-07-13 14:35:43 -070022#include "gc/collector/garbage_collector.h"
Ian Rogers7dfb28c2013-08-22 08:18:36 -070023#include "gc/space/image_space.h"
Mathieu Chartier14c3bf92015-07-13 14:35:43 -070024#include "gc/weak_root_state.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080025#include "image-inl.h"
Vladimir Marko05792b92015-08-03 11:56:49 +010026#include "mirror/dex_cache-inl.h"
Ian Rogers7dfb28c2013-08-22 08:18:36 -070027#include "mirror/object_array-inl.h"
28#include "mirror/object-inl.h"
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070029#include "mirror/string-inl.h"
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070030#include "object_callbacks.h"
Andreas Gampe508fdf32017-06-05 16:42:13 -070031#include "scoped_thread_state_change-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080032#include "thread.h"
Elliott Hughes814e4032011-08-23 12:07:56 -070033#include "utf.h"
Brian Carlstrom7e93b502011-08-04 14:16:22 -070034
35namespace art {
36
Ian Rogers7dfb28c2013-08-22 08:18:36 -070037InternTable::InternTable()
Vladimir Marko1a1de672016-10-13 12:53:15 +010038 : log_new_roots_(false),
Mathieu Chartier14c3bf92015-07-13 14:35:43 -070039 weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
40 weak_root_state_(gc::kWeakRootStateNormal) {
Mathieu Chartierc11d9b82013-09-19 10:01:59 -070041}
Elliott Hughesde69d7f2011-08-18 16:49:37 -070042
Brian Carlstroma663ea52011-08-19 23:33:41 -070043size_t InternTable::Size() const {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +010044 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070045 return strong_interns_.Size() + weak_interns_.Size();
Brian Carlstroma663ea52011-08-19 23:33:41 -070046}
47
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070048size_t InternTable::StrongSize() const {
49 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070050 return strong_interns_.Size();
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070051}
52
53size_t InternTable::WeakSize() const {
54 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartiereb175f72014-10-31 11:49:27 -070055 return weak_interns_.Size();
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070056}
57
Elliott Hughescac6cc72011-11-03 20:31:21 -070058void InternTable::DumpForSigQuit(std::ostream& os) const {
Mathieu Chartiereb175f72014-10-31 11:49:27 -070059 os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
Elliott Hughescac6cc72011-11-03 20:31:21 -070060}
61
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070062void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +010063 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -080064 if ((flags & kVisitRootFlagAllRoots) != 0) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070065 strong_interns_.VisitRoots(visitor);
Mathieu Chartier893263b2014-03-04 11:07:42 -080066 } else if ((flags & kVisitRootFlagNewRoots) != 0) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070067 for (auto& root : new_strong_intern_roots_) {
Mathieu Chartier9e868092016-10-31 14:58:04 -070068 ObjPtr<mirror::String> old_ref = root.Read<kWithoutReadBarrier>();
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -070069 root.VisitRoot(visitor, RootInfo(kRootInternedString));
Mathieu Chartier9e868092016-10-31 14:58:04 -070070 ObjPtr<mirror::String> new_ref = root.Read<kWithoutReadBarrier>();
Mathieu Chartierc2e20622014-11-03 11:41:47 -080071 if (new_ref != old_ref) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -070072 // The GC moved a root in the log. Need to search the strong interns and update the
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070073 // corresponding object. This is slow, but luckily for us, this may only happen with a
74 // concurrent moving GC.
Mathieu Chartiereb175f72014-10-31 11:49:27 -070075 strong_interns_.Remove(old_ref);
76 strong_interns_.Insert(new_ref);
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070077 }
78 }
Mathieu Chartier893263b2014-03-04 11:07:42 -080079 }
Mathieu Chartier893263b2014-03-04 11:07:42 -080080 if ((flags & kVisitRootFlagClearRootLog) != 0) {
81 new_strong_intern_roots_.clear();
82 }
83 if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
84 log_new_roots_ = true;
85 } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
86 log_new_roots_ = false;
Ian Rogers1d54e732013-05-02 21:10:01 -070087 }
Mathieu Chartier423d2a32013-09-12 17:33:56 -070088 // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
Brian Carlstrom7e93b502011-08-04 14:16:22 -070089}
90
Mathieu Chartier9e868092016-10-31 14:58:04 -070091ObjPtr<mirror::String> InternTable::LookupWeak(Thread* self, ObjPtr<mirror::String> s) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -080092 MutexLock mu(self, *Locks::intern_table_lock_);
93 return LookupWeakLocked(s);
Mathieu Chartierf7fd9702015-11-09 11:16:49 -080094}
95
Mathieu Chartier9e868092016-10-31 14:58:04 -070096ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self, ObjPtr<mirror::String> s) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -080097 MutexLock mu(self, *Locks::intern_table_lock_);
98 return LookupStrongLocked(s);
99}
100
Mathieu Chartier9e868092016-10-31 14:58:04 -0700101ObjPtr<mirror::String> InternTable::LookupStrong(Thread* self,
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000102 uint32_t utf16_length,
103 const char* utf8_data) {
104 DCHECK_EQ(utf16_length, CountModifiedUtf8Chars(utf8_data));
105 Utf8String string(utf16_length,
106 utf8_data,
107 ComputeUtf16HashFromModifiedUtf8(utf8_data, utf16_length));
108 MutexLock mu(self, *Locks::intern_table_lock_);
109 return strong_interns_.Find(string);
110}
111
Mathieu Chartier9e868092016-10-31 14:58:04 -0700112ObjPtr<mirror::String> InternTable::LookupWeakLocked(ObjPtr<mirror::String> s) {
Nicolas Geoffray1bc977c2016-01-23 14:15:49 +0000113 return weak_interns_.Find(s);
114}
115
Mathieu Chartier9e868092016-10-31 14:58:04 -0700116ObjPtr<mirror::String> InternTable::LookupStrongLocked(ObjPtr<mirror::String> s) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800117 return strong_interns_.Find(s);
118}
119
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800120void InternTable::AddNewTable() {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700121 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800122 weak_interns_.AddNewTable();
123 strong_interns_.AddNewTable();
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700124}
125
Mathieu Chartier9e868092016-10-31 14:58:04 -0700126ObjPtr<mirror::String> InternTable::InsertStrong(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100127 Runtime* runtime = Runtime::Current();
128 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700129 runtime->RecordStrongStringInsertion(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100130 }
Mathieu Chartier893263b2014-03-04 11:07:42 -0800131 if (log_new_roots_) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700132 new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
Mathieu Chartier893263b2014-03-04 11:07:42 -0800133 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700134 strong_interns_.Insert(s);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800135 return s;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100136}
137
Mathieu Chartier9e868092016-10-31 14:58:04 -0700138ObjPtr<mirror::String> InternTable::InsertWeak(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100139 Runtime* runtime = Runtime::Current();
140 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700141 runtime->RecordWeakStringInsertion(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100142 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700143 weak_interns_.Insert(s);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700144 return s;
145}
146
Mathieu Chartier9e868092016-10-31 14:58:04 -0700147void InternTable::RemoveStrong(ObjPtr<mirror::String> s) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700148 strong_interns_.Remove(s);
Hiroshi Yamauchi1bd48722014-05-23 19:58:15 -0700149}
150
Mathieu Chartier9e868092016-10-31 14:58:04 -0700151void InternTable::RemoveWeak(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100152 Runtime* runtime = Runtime::Current();
153 if (runtime->IsActiveTransaction()) {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700154 runtime->RecordWeakStringRemoval(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100155 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700156 weak_interns_.Remove(s);
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700157}
158
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100159// Insert/remove methods used to undo changes made during an aborted transaction.
Mathieu Chartier9e868092016-10-31 14:58:04 -0700160ObjPtr<mirror::String> InternTable::InsertStrongFromTransaction(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100161 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700162 return InsertStrong(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100163}
Mathieu Chartier9e868092016-10-31 14:58:04 -0700164
165ObjPtr<mirror::String> InternTable::InsertWeakFromTransaction(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100166 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700167 return InsertWeak(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100168}
Mathieu Chartier9e868092016-10-31 14:58:04 -0700169
170void InternTable::RemoveStrongFromTransaction(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100171 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700172 RemoveStrong(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100173}
Mathieu Chartier9e868092016-10-31 14:58:04 -0700174
175void InternTable::RemoveWeakFromTransaction(ObjPtr<mirror::String> s) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100176 DCHECK(!Runtime::Current()->IsActiveTransaction());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700177 RemoveWeak(s);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100178}
179
Mathieu Chartier205b7622016-01-06 15:47:09 -0800180void InternTable::AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700181 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier205b7622016-01-06 15:47:09 -0800182 for (gc::space::ImageSpace* image_space : image_spaces) {
183 const ImageHeader* const header = &image_space->GetImageHeader();
184 // Check if we have the interned strings section.
185 const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
186 if (section.Size() > 0) {
187 AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700188 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700189 }
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700190}
191
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700192void InternTable::BroadcastForNewInterns() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700193 Thread* self = Thread::Current();
194 MutexLock mu(self, *Locks::intern_table_lock_);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700195 weak_intern_condition_.Broadcast(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700196}
197
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700198void InternTable::WaitUntilAccessible(Thread* self) {
199 Locks::intern_table_lock_->ExclusiveUnlock(self);
Mathieu Chartierf1d666e2015-09-03 16:13:34 -0700200 {
201 ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
202 MutexLock mu(self, *Locks::intern_table_lock_);
Hiroshi Yamauchi9e6f0972016-11-03 13:03:20 -0700203 while ((!kUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) ||
204 (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
Mathieu Chartierf1d666e2015-09-03 16:13:34 -0700205 weak_intern_condition_.Wait(self);
206 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700207 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700208 Locks::intern_table_lock_->ExclusiveLock(self);
209}
210
Mathieu Chartier9e868092016-10-31 14:58:04 -0700211ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s,
212 bool is_strong,
213 bool holding_locks) {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800214 if (s == nullptr) {
215 return nullptr;
216 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700217 Thread* const self = Thread::Current();
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100218 MutexLock mu(self, *Locks::intern_table_lock_);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700219 if (kDebugLocking && !holding_locks) {
220 Locks::mutator_lock_->AssertSharedHeld(self);
221 CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700222 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700223 while (true) {
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700224 if (holding_locks) {
225 if (!kUseReadBarrier) {
226 CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
227 } else {
228 CHECK(self->GetWeakRefAccessEnabled());
229 }
230 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700231 // Check the strong table for a match.
Mathieu Chartier9e868092016-10-31 14:58:04 -0700232 ObjPtr<mirror::String> strong = LookupStrongLocked(s);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700233 if (strong != nullptr) {
234 return strong;
235 }
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700236 if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) ||
237 (kUseReadBarrier && self->GetWeakRefAccessEnabled())) {
238 break;
239 }
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700240 // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
241 // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
242 // cleared.
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700243 CHECK(!holding_locks);
244 StackHandleScope<1> hs(self);
245 auto h = hs.NewHandleWrapper(&s);
246 WaitUntilAccessible(self);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700247 }
Mathieu Chartier90ef3db2015-08-04 15:19:41 -0700248 if (!kUseReadBarrier) {
249 CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
250 } else {
251 CHECK(self->GetWeakRefAccessEnabled());
252 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800253 // There is no match in the strong table, check the weak table.
Mathieu Chartier9e868092016-10-31 14:58:04 -0700254 ObjPtr<mirror::String> weak = LookupWeakLocked(s);
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800255 if (weak != nullptr) {
256 if (is_strong) {
257 // A match was found in the weak table. Promote to the strong table.
258 RemoveWeak(weak);
259 return InsertStrong(weak);
260 }
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700261 return weak;
262 }
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800263 // No match in the strong table or the weak table. Insert into the strong / weak table.
264 return is_strong ? InsertStrong(s) : InsertWeak(s);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700265}
266
Mathieu Chartier9e868092016-10-31 14:58:04 -0700267ObjPtr<mirror::String> InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
Mathieu Chartiered0fc1d2014-03-21 14:09:35 -0700268 DCHECK(utf8_data != nullptr);
Vladimir Markod2bdb9b2016-07-08 17:23:22 +0100269 Thread* self = Thread::Current();
270 // Try to avoid allocation.
Mathieu Chartier9e868092016-10-31 14:58:04 -0700271 ObjPtr<mirror::String> s = LookupStrong(self, utf16_length, utf8_data);
Vladimir Markod2bdb9b2016-07-08 17:23:22 +0100272 if (s != nullptr) {
273 return s;
274 }
Ian Rogers7dfb28c2013-08-22 08:18:36 -0700275 return InternStrong(mirror::String::AllocFromModifiedUtf8(
Vladimir Markod2bdb9b2016-07-08 17:23:22 +0100276 self, utf16_length, utf8_data));
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700277}
278
Mathieu Chartier9e868092016-10-31 14:58:04 -0700279ObjPtr<mirror::String> InternTable::InternStrong(const char* utf8_data) {
Mathieu Chartiered0fc1d2014-03-21 14:09:35 -0700280 DCHECK(utf8_data != nullptr);
281 return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
Brian Carlstromc74255f2011-09-11 22:47:39 -0700282}
283
Mathieu Chartier9e868092016-10-31 14:58:04 -0700284ObjPtr<mirror::String> InternTable::InternStrongImageString(ObjPtr<mirror::String> s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700285 // May be holding the heap bitmap lock.
286 return Insert(s, true, true);
287}
288
Mathieu Chartier9e868092016-10-31 14:58:04 -0700289ObjPtr<mirror::String> InternTable::InternStrong(ObjPtr<mirror::String> s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700290 return Insert(s, true, false);
Brian Carlstromc74255f2011-09-11 22:47:39 -0700291}
292
Mathieu Chartier9e868092016-10-31 14:58:04 -0700293ObjPtr<mirror::String> InternTable::InternWeak(ObjPtr<mirror::String> s) {
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700294 return Insert(s, false, false);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700295}
296
Mathieu Chartier9e868092016-10-31 14:58:04 -0700297bool InternTable::ContainsWeak(ObjPtr<mirror::String> s) {
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800298 return LookupWeak(Thread::Current(), s) == s;
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700299}
300
Mathieu Chartier97509952015-07-13 14:35:43 -0700301void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100302 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -0700303 weak_interns_.SweepWeaks(visitor);
Brian Carlstroma663ea52011-08-19 23:33:41 -0700304}
305
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800306size_t InternTable::AddTableFromMemory(const uint8_t* ptr) {
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700307 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800308 return AddTableFromMemoryLocked(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700309}
310
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800311size_t InternTable::AddTableFromMemoryLocked(const uint8_t* ptr) {
312 return strong_interns_.AddTableFromMemory(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700313}
314
315size_t InternTable::WriteToMemory(uint8_t* ptr) {
316 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800317 return strong_interns_.WriteToMemory(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700318}
319
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800320std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700321 if (kIsDebugBuild) {
322 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
323 }
Alexey Grebenkin21f23642016-12-02 17:44:54 +0300324 // An additional cast to prevent undesired sign extension.
325 return static_cast<size_t>(
326 static_cast<uint32_t>(root.Read<kWithoutReadBarrier>()->GetHashCode()));
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700327}
328
329bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800330 const GcRoot<mirror::String>& b) const {
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700331 if (kIsDebugBuild) {
332 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
333 }
Mathieu Chartier9e868092016-10-31 14:58:04 -0700334 return a.Read<kWithoutReadBarrier>()->Equals(b.Read<kWithoutReadBarrier>());
Mathieu Chartiercdfd39f2014-08-29 18:16:58 -0700335}
336
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000337bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
338 const Utf8String& b) const {
339 if (kIsDebugBuild) {
340 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
341 }
Mathieu Chartier9e868092016-10-31 14:58:04 -0700342 ObjPtr<mirror::String> a_string = a.Read<kWithoutReadBarrier>();
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000343 uint32_t a_length = static_cast<uint32_t>(a_string->GetLength());
344 if (a_length != b.GetUtf16Length()) {
345 return false;
346 }
jessicahandojo3aaa37b2016-07-29 14:46:37 -0700347 if (a_string->IsCompressed()) {
348 size_t b_byte_count = strlen(b.GetUtf8Data());
349 size_t b_utf8_length = CountModifiedUtf8Chars(b.GetUtf8Data(), b_byte_count);
350 // Modified UTF-8 single byte character range is 0x01 .. 0x7f
351 // The string compression occurs on regular ASCII with same exact range,
352 // not on extended ASCII which up to 0xff
353 const bool is_b_regular_ascii = (b_byte_count == b_utf8_length);
354 if (is_b_regular_ascii) {
355 return memcmp(b.GetUtf8Data(),
356 a_string->GetValueCompressed(), a_length * sizeof(uint8_t)) == 0;
357 } else {
358 return false;
359 }
360 } else {
361 const uint16_t* a_value = a_string->GetValue();
362 return CompareModifiedUtf8ToUtf16AsCodePointValues(b.GetUtf8Data(), a_value, a_length) == 0;
363 }
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000364}
365
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800366size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700367 size_t read_count = 0;
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800368 UnorderedSet set(ptr, /*make copy*/false, &read_count);
Mathieu Chartier619a4572016-04-05 19:13:37 -0700369 if (set.Empty()) {
370 // Avoid inserting empty sets.
371 return read_count;
372 }
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800373 // TODO: Disable this for app images if app images have intern tables.
374 static constexpr bool kCheckDuplicates = true;
375 if (kCheckDuplicates) {
376 for (GcRoot<mirror::String>& string : set) {
377 CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8();
378 }
379 }
Mathieu Chartier619a4572016-04-05 19:13:37 -0700380 // Insert at the front since we add new interns into the back.
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800381 tables_.insert(tables_.begin(), std::move(set));
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700382 return read_count;
383}
384
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800385size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
386 if (tables_.empty()) {
387 return 0;
388 }
389 UnorderedSet* table_to_write;
390 UnorderedSet combined;
391 if (tables_.size() > 1) {
392 table_to_write = &combined;
393 for (UnorderedSet& table : tables_) {
394 for (GcRoot<mirror::String>& string : table) {
395 combined.Insert(string);
396 }
397 }
398 } else {
399 table_to_write = &tables_.back();
400 }
401 return table_to_write->WriteToMemory(ptr);
Mathieu Chartierd39645e2015-06-09 17:50:29 -0700402}
403
Mathieu Chartier9e868092016-10-31 14:58:04 -0700404void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800405 for (UnorderedSet& table : tables_) {
406 auto it = table.Find(GcRoot<mirror::String>(s));
407 if (it != table.end()) {
408 table.Erase(it);
409 return;
410 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700411 }
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800412 LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8();
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700413}
414
Mathieu Chartier9e868092016-10-31 14:58:04 -0700415ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700416 Locks::intern_table_lock_->AssertHeld(Thread::Current());
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800417 for (UnorderedSet& table : tables_) {
418 auto it = table.Find(GcRoot<mirror::String>(s));
419 if (it != table.end()) {
420 return it->Read();
421 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700422 }
423 return nullptr;
424}
425
Mathieu Chartier9e868092016-10-31 14:58:04 -0700426ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000427 Locks::intern_table_lock_->AssertHeld(Thread::Current());
428 for (UnorderedSet& table : tables_) {
429 auto it = table.Find(string);
430 if (it != table.end()) {
431 return it->Read();
432 }
433 }
434 return nullptr;
435}
436
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800437void InternTable::Table::AddNewTable() {
438 tables_.push_back(UnorderedSet());
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700439}
440
Mathieu Chartier9e868092016-10-31 14:58:04 -0700441void InternTable::Table::Insert(ObjPtr<mirror::String> s) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800442 // Always insert the last table, the image tables are before and we avoid inserting into these
443 // to prevent dirty pages.
444 DCHECK(!tables_.empty());
445 tables_.back().Insert(GcRoot<mirror::String>(s));
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700446}
447
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700448void InternTable::Table::VisitRoots(RootVisitor* visitor) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -0700449 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
450 visitor, RootInfo(kRootInternedString));
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800451 for (UnorderedSet& table : tables_) {
452 for (auto& intern : table) {
453 buffered_visitor.VisitRoot(intern);
454 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700455 }
456}
457
Mathieu Chartier97509952015-07-13 14:35:43 -0700458void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800459 for (UnorderedSet& table : tables_) {
460 SweepWeaks(&table, visitor);
461 }
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700462}
463
Mathieu Chartier97509952015-07-13 14:35:43 -0700464void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) {
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700465 for (auto it = set->begin(), end = set->end(); it != end;) {
466 // This does not need a read barrier because this is called by GC.
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800467 mirror::Object* object = it->Read<kWithoutReadBarrier>();
Mathieu Chartier97509952015-07-13 14:35:43 -0700468 mirror::Object* new_object = visitor->IsMarked(object);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700469 if (new_object == nullptr) {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800470 it = set->Erase(it);
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700471 } else {
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800472 *it = GcRoot<mirror::String>(new_object->AsString());
Mathieu Chartiereb175f72014-10-31 11:49:27 -0700473 ++it;
474 }
475 }
476}
477
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800478size_t InternTable::Table::Size() const {
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800479 return std::accumulate(tables_.begin(),
480 tables_.end(),
Mathieu Chartier205b7622016-01-06 15:47:09 -0800481 0U,
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800482 [](size_t sum, const UnorderedSet& set) {
483 return sum + set.Size();
484 });
Mathieu Chartierc2e20622014-11-03 11:41:47 -0800485}
486
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700487void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
488 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
489 ChangeWeakRootStateLocked(new_state);
490}
491
492void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700493 CHECK(!kUseReadBarrier);
Mathieu Chartier14c3bf92015-07-13 14:35:43 -0700494 weak_root_state_ = new_state;
495 if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
496 weak_intern_condition_.Broadcast(Thread::Current());
497 }
498}
499
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700500InternTable::Table::Table() {
501 Runtime* const runtime = Runtime::Current();
Mathieu Chartierea0831f2015-12-29 13:17:37 -0800502 // Initial table.
503 tables_.push_back(UnorderedSet());
504 tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
505 runtime->GetHashTableMaxLoadFactor());
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700506}
507
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700508} // namespace art