Igor Murashkin | e2facc5 | 2015-07-10 13:49:08 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | #include "lambda/box_table.h" |
| 17 | |
| 18 | #include "base/mutex.h" |
| 19 | #include "common_throws.h" |
| 20 | #include "gc_root-inl.h" |
| 21 | #include "mirror/method.h" |
| 22 | #include "mirror/object-inl.h" |
| 23 | #include "thread.h" |
| 24 | |
| 25 | #include <vector> |
| 26 | |
| 27 | namespace art { |
| 28 | namespace lambda { |
| 29 | |
| 30 | BoxTable::BoxTable() |
| 31 | : allow_new_weaks_(true), |
| 32 | new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {} |
| 33 | |
| 34 | mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) { |
| 35 | Thread* self = Thread::Current(); |
| 36 | |
| 37 | { |
| 38 | // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes |
| 39 | /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_); |
| 40 | BlockUntilWeaksAllowed(); |
| 41 | |
| 42 | // Attempt to look up this object, it's possible it was already boxed previously. |
| 43 | // If this is the case we *must* return the same object as before to maintain |
| 44 | // referential equality. |
| 45 | // |
| 46 | // In managed code: |
| 47 | // Functional f = () -> 5; // vF = create-lambda |
| 48 | // Object a = f; // vA = box-lambda vA |
| 49 | // Object b = f; // vB = box-lambda vB |
| 50 | // assert(a == f) |
| 51 | ValueType value = FindBoxedLambda(closure); |
| 52 | if (!value.IsNull()) { |
| 53 | return value.Read(); |
| 54 | } |
| 55 | |
| 56 | // Otherwise we need to box ourselves and insert it into the hash map |
| 57 | } |
| 58 | |
| 59 | // Release the lambda table lock here, so that thread suspension is allowed. |
| 60 | |
| 61 | // Convert the ArtMethod into a java.lang.reflect.Method which will serve |
| 62 | // as the temporary 'boxed' version of the lambda. This is good enough |
| 63 | // to check all the basic object identities that a boxed lambda must retain. |
| 64 | |
| 65 | // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class |
| 66 | // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object |
| 67 | mirror::Method* method_as_object = |
| 68 | mirror::Method::CreateFromArtMethod(self, closure); |
| 69 | // There are no thread suspension points after this, so we don't need to put it into a handle. |
| 70 | |
| 71 | if (UNLIKELY(method_as_object == nullptr)) { |
| 72 | // Most likely an OOM has occurred. |
| 73 | CHECK(self->IsExceptionPending()); |
| 74 | return nullptr; |
| 75 | } |
| 76 | |
| 77 | // The method has been successfully boxed into an object, now insert it into the hash map. |
| 78 | { |
| 79 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 80 | BlockUntilWeaksAllowed(); |
| 81 | |
| 82 | // Lookup the object again, it's possible another thread already boxed it while |
| 83 | // we were allocating the object before. |
| 84 | ValueType value = FindBoxedLambda(closure); |
| 85 | if (UNLIKELY(!value.IsNull())) { |
| 86 | // Let the GC clean up method_as_object at a later time. |
| 87 | return value.Read(); |
| 88 | } |
| 89 | |
| 90 | // Otherwise we should insert it into the hash map in this thread. |
| 91 | map_.Insert(std::make_pair(closure, ValueType(method_as_object))); |
| 92 | } |
| 93 | |
| 94 | return method_as_object; |
| 95 | } |
| 96 | |
| 97 | bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) { |
| 98 | DCHECK(object != nullptr); |
| 99 | *out_closure = nullptr; |
| 100 | |
| 101 | // Note that we do not need to access lambda_table_lock_ here |
| 102 | // since we don't need to look at the map. |
| 103 | |
| 104 | mirror::Object* boxed_closure_object = object; |
| 105 | |
| 106 | // Raise ClassCastException if object is not instanceof java.lang.reflect.Method |
| 107 | if (UNLIKELY(!boxed_closure_object->InstanceOf(mirror::Method::StaticClass()))) { |
| 108 | ThrowClassCastException(mirror::Method::StaticClass(), boxed_closure_object->GetClass()); |
| 109 | return false; |
| 110 | } |
| 111 | |
| 112 | // TODO(iam): We must check that the closure object extends/implements the type |
| 113 | // specified in [type id]. This is not currently implemented since it's always a Method. |
| 114 | |
| 115 | // If we got this far, the inputs are valid. |
| 116 | // Write out the java.lang.reflect.Method's embedded ArtMethod* into the vreg target. |
| 117 | mirror::AbstractMethod* boxed_closure_as_method = |
| 118 | down_cast<mirror::AbstractMethod*>(boxed_closure_object); |
| 119 | |
| 120 | ArtMethod* unboxed_closure = boxed_closure_as_method->GetArtMethod(); |
| 121 | DCHECK(unboxed_closure != nullptr); |
| 122 | |
| 123 | *out_closure = unboxed_closure; |
| 124 | return true; |
| 125 | } |
| 126 | |
| 127 | BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const { |
| 128 | auto map_iterator = map_.Find(closure); |
| 129 | if (map_iterator != map_.end()) { |
| 130 | const std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator; |
| 131 | const ValueType& value = key_value_pair.second; |
| 132 | |
| 133 | DCHECK(!value.IsNull()); // Never store null boxes. |
| 134 | return value; |
| 135 | } |
| 136 | |
| 137 | return ValueType(nullptr); |
| 138 | } |
| 139 | |
| 140 | void BoxTable::BlockUntilWeaksAllowed() { |
| 141 | Thread* self = Thread::Current(); |
| 142 | while (UNLIKELY(allow_new_weaks_ == false)) { |
| 143 | new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock |
| 144 | } |
| 145 | } |
| 146 | |
| 147 | void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) { |
| 148 | DCHECK(visitor != nullptr); |
| 149 | |
| 150 | Thread* self = Thread::Current(); |
| 151 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 152 | |
| 153 | /* |
| 154 | * Visit every weak root in our lambda box table. |
| 155 | * Remove unmarked objects, update marked objects to new address. |
| 156 | */ |
| 157 | std::vector<ClosureType> remove_list; |
| 158 | for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) { |
| 159 | std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator; |
| 160 | |
| 161 | const ValueType& old_value = key_value_pair.second; |
| 162 | |
| 163 | // This does not need a read barrier because this is called by GC. |
| 164 | mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>(); |
| 165 | mirror::Object* new_value = visitor->IsMarked(old_value_raw); |
| 166 | |
| 167 | if (new_value == nullptr) { |
| 168 | const ClosureType& closure = key_value_pair.first; |
| 169 | // The object has been swept away. |
| 170 | // Delete the entry from the map. |
| 171 | map_iterator = map_.Erase(map_.Find(closure)); |
| 172 | } else { |
| 173 | // The object has been moved. |
| 174 | // Update the map. |
| 175 | key_value_pair.second = ValueType(new_value); |
| 176 | ++map_iterator; |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | // Occasionally shrink the map to avoid growing very large. |
| 181 | if (map_.CalculateLoadFactor() < kMinimumLoadFactor) { |
| 182 | map_.ShrinkToMaximumLoad(); |
| 183 | } |
| 184 | } |
| 185 | |
| 186 | void BoxTable::DisallowNewWeakBoxedLambdas() { |
| 187 | Thread* self = Thread::Current(); |
| 188 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 189 | |
| 190 | allow_new_weaks_ = false; |
| 191 | } |
| 192 | |
| 193 | void BoxTable::AllowNewWeakBoxedLambdas() { |
| 194 | Thread* self = Thread::Current(); |
| 195 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 196 | |
| 197 | allow_new_weaks_ = true; |
| 198 | new_weaks_condition_.Broadcast(self); |
| 199 | } |
| 200 | |
| 201 | void BoxTable::EnsureNewWeakBoxedLambdasDisallowed() { |
| 202 | Thread* self = Thread::Current(); |
| 203 | MutexLock mu(self, *Locks::lambda_table_lock_); |
| 204 | CHECK_NE(allow_new_weaks_, false); |
| 205 | } |
| 206 | |
| 207 | bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const { |
| 208 | // Nothing needs this right now, but leave this assertion for later when |
| 209 | // we need to look at the references inside of the closure. |
| 210 | if (kIsDebugBuild) { |
| 211 | Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); |
| 212 | } |
| 213 | |
| 214 | // TODO: Need rework to use read barriers once closures have references inside of them that can |
| 215 | // move. Until then, it's safe to just compare the data inside of it directly. |
| 216 | return lhs == rhs; |
| 217 | } |
| 218 | |
| 219 | } // namespace lambda |
| 220 | } // namespace art |