lambda: Add support for invoke-interface for boxed innate lambdas
Lambda closures created with the 'create-lambda' instruction
(termed "innate lambdas") can be turned into an object with 'box-lambda'.
This CL enables support for those kinds of lambdas to work with
'invoke-interface' by generating a proxy class for the lambda.
Note: MIPS32/64 support not included.
Bug: 24618608
Bug: 25107649
Change-Id: Ic8f1bb66ebeaed4097e758a50becf1cff6ccaefb
diff --git a/runtime/lambda/art_lambda_method.cc b/runtime/lambda/art_lambda_method.cc
index 6f9f8bb..0690cd1 100644
--- a/runtime/lambda/art_lambda_method.cc
+++ b/runtime/lambda/art_lambda_method.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "art_method-inl.h"
#include "lambda/art_lambda_method.h"
#include "base/logging.h"
@@ -73,5 +74,12 @@
}
}
+size_t ArtLambdaMethod::GetArgumentVRegCount() const {
+ DCHECK(GetArtMethod()->IsStatic()); // Instance methods don't have receiver in shorty.
+ const char* method_shorty = GetArtMethod()->GetShorty();
+ DCHECK_NE(*method_shorty, '\0') << method_shorty;
+ return ShortyFieldType::CountVirtualRegistersRequired(method_shorty + 1); // skip return type
+}
+
} // namespace lambda
} // namespace art
diff --git a/runtime/lambda/art_lambda_method.h b/runtime/lambda/art_lambda_method.h
index ea13eb7..a858bf9 100644
--- a/runtime/lambda/art_lambda_method.h
+++ b/runtime/lambda/art_lambda_method.h
@@ -90,6 +90,17 @@
return strlen(captured_variables_shorty_);
}
+ // Return the offset in bytes from the start of ArtLambdaMethod to the method_.
+ // -- Only should be used by assembly (stubs) support code and compiled code.
+ static constexpr size_t GetArtMethodOffset() {
+ return offsetof(ArtLambdaMethod, method_);
+ }
+
+ // Calculate how many vregs all the arguments will use when doing an invoke.
+ // (Most primitives are 1 vregs, double/long are 2, reference is 1, lambda is 2).
+ // -- This is used to know how big to set up shadow frame when invoking into the target method.
+ size_t GetArgumentVRegCount() const SHARED_REQUIRES(Locks::mutator_lock_);
+
private:
// TODO: ArtMethod, or at least the entry points should be inlined into this struct
// to avoid an extra indirect load when doing invokes.
diff --git a/runtime/lambda/box_class_table-inl.h b/runtime/lambda/box_class_table-inl.h
new file mode 100644
index 0000000..2fc34a7
--- /dev/null
+++ b/runtime/lambda/box_class_table-inl.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_
+#define ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_
+
+#include "lambda/box_class_table.h"
+#include "thread.h"
+
+namespace art {
+namespace lambda {
+
+template <typename Visitor>
+inline void BoxClassTable::VisitRoots(const Visitor& visitor) {
+ MutexLock mu(Thread::Current(), *Locks::lambda_class_table_lock_);
+ for (std::pair<UnorderedMapKeyType, ValueType>& key_value : map_) {
+ ValueType& gc_root = key_value.second;
+ visitor.VisitRoot(gc_root.AddressWithoutBarrier());
+ }
+}
+
+} // namespace lambda
+} // namespace art
+
+#endif // ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_
diff --git a/runtime/lambda/box_class_table.cc b/runtime/lambda/box_class_table.cc
new file mode 100644
index 0000000..1e49886
--- /dev/null
+++ b/runtime/lambda/box_class_table.cc
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "lambda/box_class_table.h"
+
+#include "base/mutex.h"
+#include "common_throws.h"
+#include "gc_root-inl.h"
+#include "lambda/closure.h"
+#include "lambda/leaking_allocator.h"
+#include "mirror/method.h"
+#include "mirror/object-inl.h"
+#include "thread.h"
+
+#include <string>
+#include <vector>
+
+namespace art {
+namespace lambda {
+
+// Create the lambda proxy class given the name of the lambda interface (e.g. Ljava/lang/Runnable;)
+// Also needs a proper class loader (or null for bootclasspath) where the proxy will be created
+// into.
+//
+// The class must **not** have already been created.
+// Returns a non-null ptr on success, otherwise returns null and has an exception set.
+static mirror::Class* CreateClass(Thread* self,
+ const std::string& class_name,
+ const Handle<mirror::ClassLoader>& class_loader)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ScopedObjectAccessUnchecked soa(self);
+ StackHandleScope<2> hs(self);
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+ // Find the java.lang.Class for our class name (from the class loader).
+ Handle<mirror::Class> lambda_interface =
+ hs.NewHandle(class_linker->FindClass(self, class_name.c_str(), class_loader));
+ // TODO: use LookupClass in a loop
+ // TODO: DCHECK That this doesn't actually cause the class to be loaded,
+ // since the create-lambda should've loaded it already
+ DCHECK(lambda_interface.Get() != nullptr) << "CreateClass with class_name=" << class_name;
+ DCHECK(lambda_interface->IsInterface()) << "CreateClass with class_name=" << class_name;
+ jobject lambda_interface_class = soa.AddLocalReference<jobject>(lambda_interface.Get());
+
+ // Look up java.lang.reflect.Proxy#getLambdaProxyClass method.
+ Handle<mirror::Class> java_lang_reflect_proxy =
+ hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/reflect/Proxy;"));
+ jclass java_lang_reflect_proxy_class =
+ soa.AddLocalReference<jclass>(java_lang_reflect_proxy.Get());
+ DCHECK(java_lang_reflect_proxy.Get() != nullptr);
+
+ jmethodID proxy_factory_method_id =
+ soa.Env()->GetStaticMethodID(java_lang_reflect_proxy_class,
+ "getLambdaProxyClass",
+ "(Ljava/lang/ClassLoader;Ljava/lang/Class;)Ljava/lang/Class;");
+ DCHECK(!soa.Env()->ExceptionCheck());
+
+ // Call into the java code to do the hard work of figuring out which methods and throws
+ // our lambda interface proxy needs to implement. It then calls back into the class linker
+ // on our behalf to make the proxy itself.
+ jobject generated_lambda_proxy_class =
+ soa.Env()->CallStaticObjectMethod(java_lang_reflect_proxy_class,
+ proxy_factory_method_id,
+ class_loader.ToJObject(),
+ lambda_interface_class);
+
+ // This can throw in which case we return null. Caller must handle.
+ return soa.Decode<mirror::Class*>(generated_lambda_proxy_class);
+}
+
+BoxClassTable::BoxClassTable() {
+}
+
+BoxClassTable::~BoxClassTable() {
+ // Don't need to do anything, classes are deleted automatically by GC
+ // when the classloader is deleted.
+ //
+ // Our table will not outlive the classloader since the classloader owns it.
+}
+
+mirror::Class* BoxClassTable::GetOrCreateBoxClass(const char* class_name,
+ const Handle<mirror::ClassLoader>& class_loader) {
+ DCHECK(class_name != nullptr);
+
+ Thread* self = Thread::Current();
+
+ std::string class_name_str = class_name;
+
+ {
+ MutexLock mu(self, *Locks::lambda_class_table_lock_);
+
+ // Attempt to look up this class, it's possible it was already created previously.
+ // If this is the case we *must* return the same class as before to maintain
+ // referential equality between box instances.
+ //
+ // In managed code:
+ // Functional f = () -> 5; // vF = create-lambda
+ // Object a = f; // vA = box-lambda vA
+ // Object b = f; // vB = box-lambda vB
+ // assert(a.getClass() == b.getClass())
+ // assert(a == b)
+ ValueType value = FindBoxedClass(class_name_str);
+ if (!value.IsNull()) {
+ return value.Read();
+ }
+ }
+
+ // Otherwise we need to generate a class ourselves and insert it into the hash map
+
+ // Release the table lock here, which implicitly allows other threads to suspend
+ // (since the GC callbacks will not block on trying to acquire our lock).
+ // We also don't want to call into the class linker with the lock held because
+ // our lock level is lower.
+ self->AllowThreadSuspension();
+
+ // Create a lambda proxy class, within the specified class loader.
+ mirror::Class* lambda_proxy_class = CreateClass(self, class_name_str, class_loader);
+
+ // There are no thread suspension points after this, so we don't need to put it into a handle.
+ ScopedAssertNoThreadSuspension soants{self, "BoxClassTable::GetOrCreateBoxClass"}; // NOLINT: [readability/braces] [4]
+
+ if (UNLIKELY(lambda_proxy_class == nullptr)) {
+ // Most likely an OOM has occurred.
+ CHECK(self->IsExceptionPending());
+ return nullptr;
+ }
+
+ {
+ MutexLock mu(self, *Locks::lambda_class_table_lock_);
+
+ // Possible, but unlikely, that someone already came in and made a proxy class
+ // on another thread.
+ ValueType value = FindBoxedClass(class_name_str);
+ if (UNLIKELY(!value.IsNull())) {
+ DCHECK_EQ(lambda_proxy_class, value.Read());
+ return value.Read();
+ }
+
+ // Otherwise we made a brand new proxy class.
+ // The class itself is cleaned up by the GC (e.g. class unloading) later.
+
+ // Actually insert into the table.
+ map_.Insert({std::move(class_name_str), ValueType(lambda_proxy_class)});
+ }
+
+ return lambda_proxy_class;
+}
+
+BoxClassTable::ValueType BoxClassTable::FindBoxedClass(const std::string& class_name) const {
+ auto map_iterator = map_.Find(class_name);
+ if (map_iterator != map_.end()) {
+ const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
+ const ValueType& value = key_value_pair.second;
+
+ DCHECK(!value.IsNull()); // Never store null boxes.
+ return value;
+ }
+
+ return ValueType(nullptr);
+}
+
+void BoxClassTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
+ item.first.clear();
+
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ item.second = ValueType(); // Also clear the GC root.
+}
+
+bool BoxClassTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
+ bool is_empty = item.first.empty();
+ DCHECK_EQ(item.second.IsNull(), is_empty);
+
+ return is_empty;
+}
+
+bool BoxClassTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
+ const UnorderedMapKeyType& rhs) const {
+ // Be damn sure the classes don't just move around from under us.
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+
+ // Being the same class name isn't enough, must also have the same class loader.
+ // When we are in the same class loader, classes are equal via the pointer.
+ return lhs == rhs;
+}
+
+size_t BoxClassTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
+ return std::hash<std::string>()(key);
+}
+
+} // namespace lambda
+} // namespace art
diff --git a/runtime/lambda/box_class_table.h b/runtime/lambda/box_class_table.h
new file mode 100644
index 0000000..17e1026
--- /dev/null
+++ b/runtime/lambda/box_class_table.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_
+#define ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_
+
+#include "base/allocator.h"
+#include "base/hash_map.h"
+#include "gc_root.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "object_callbacks.h"
+
+#include <stdint.h>
+
+namespace art {
+
+class ArtMethod; // forward declaration
+template<class T> class Handle; // forward declaration
+
+namespace mirror {
+class Class; // forward declaration
+class ClassLoader; // forward declaration
+class LambdaProxy; // forward declaration
+class Object; // forward declaration
+} // namespace mirror
+
+namespace lambda {
+struct Closure; // forward declaration
+
+/*
+ * Store a table of boxed lambdas. This is required to maintain object referential equality
+ * when a lambda is re-boxed.
+ *
+ * Conceptually, we store a mapping of Class Name -> Weak Reference<Class>.
+ * When too many objects get GCd, we shrink the underlying table to use less space.
+ */
+class BoxClassTable FINAL {
+ public:
+ // TODO: This should take a LambdaArtMethod instead, read class name from that.
+ // Note: null class_loader means bootclasspath.
+ mirror::Class* GetOrCreateBoxClass(const char* class_name,
+ const Handle<mirror::ClassLoader>& class_loader)
+ REQUIRES(!Locks::lambda_class_table_lock_, !Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Sweep strong references to lambda class boxes. Update the addresses if the objects
+ // have been moved, and delete them from the table if the objects have been cleaned up.
+ template <typename Visitor>
+ void VisitRoots(const Visitor& visitor)
+ NO_THREAD_SAFETY_ANALYSIS // for object marking requiring heap bitmap lock
+ REQUIRES(!Locks::lambda_class_table_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ BoxClassTable();
+ ~BoxClassTable();
+
+ private:
+ // We only store strong GC roots in our table.
+ using ValueType = GcRoot<mirror::Class>;
+
+ // Attempt to look up the class in the map, or return null if it's not there yet.
+ ValueType FindBoxedClass(const std::string& class_name) const
+ SHARED_REQUIRES(Locks::lambda_class_table_lock_);
+
+ // Store the key as a string so that we can have our own copy of the class name.
+ using UnorderedMapKeyType = std::string;
+
+ // EmptyFn implementation for art::HashMap
+ struct EmptyFn {
+ void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const
+ NO_THREAD_SAFETY_ANALYSIS;
+ // SHARED_REQUIRES(Locks::mutator_lock_);
+
+ bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const;
+ };
+
+ // HashFn implementation for art::HashMap
+ struct HashFn {
+ size_t operator()(const UnorderedMapKeyType& key) const
+ NO_THREAD_SAFETY_ANALYSIS;
+ // SHARED_REQUIRES(Locks::mutator_lock_);
+ };
+
+ // EqualsFn implementation for art::HashMap
+ struct EqualsFn {
+ bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const
+ NO_THREAD_SAFETY_ANALYSIS;
+ // SHARED_REQUIRES(Locks::mutator_lock_);
+ };
+
+ using UnorderedMap = art::HashMap<UnorderedMapKeyType,
+ ValueType,
+ EmptyFn,
+ HashFn,
+ EqualsFn,
+ TrackingAllocator<std::pair<UnorderedMapKeyType, ValueType>,
+ kAllocatorTagLambdaProxyClassBoxTable>>;
+
+ // Map of strong GC roots (lambda interface name -> lambda proxy class)
+ UnorderedMap map_ GUARDED_BY(Locks::lambda_class_table_lock_);
+
+ // Shrink the map when we get below this load factor.
+ // (This is an arbitrary value that should be large enough to prevent aggressive map erases
+ // from shrinking the table too often.)
+ static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
+
+ DISALLOW_COPY_AND_ASSIGN(BoxClassTable);
+};
+
+} // namespace lambda
+} // namespace art
+
+#endif // ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 9918bb7..0032d08 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -18,8 +18,10 @@
#include "base/mutex.h"
#include "common_throws.h"
#include "gc_root-inl.h"
+#include "lambda/box_class_table.h"
#include "lambda/closure.h"
#include "lambda/leaking_allocator.h"
+#include "mirror/lambda_proxy.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "thread.h"
@@ -28,12 +30,13 @@
namespace art {
namespace lambda {
-// Temporarily represent the lambda Closure as its raw bytes in an array.
-// TODO: Generate a proxy class for the closure when boxing the first time.
-using BoxedClosurePointerType = mirror::ByteArray*;
+// All closures are boxed into a subtype of LambdaProxy which implements the lambda's interface.
+using BoxedClosurePointerType = mirror::LambdaProxy*;
-static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
- return mirror::ByteArray::GetArrayClass();
+// Returns the base class for all boxed closures.
+// Note that concrete closure boxes are actually a subtype of mirror::LambdaProxy.
+static mirror::Class* GetBoxedClosureBaseClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangLambdaProxy);
}
namespace {
@@ -54,6 +57,14 @@
return closure;
}
};
+
+ struct DeleterForClosure {
+ void operator()(Closure* closure) const {
+ ClosureAllocator::Delete(closure);
+ }
+ };
+
+ using UniqueClosurePtr = std::unique_ptr<Closure, DeleterForClosure>;
} // namespace
BoxTable::BoxTable()
@@ -75,7 +86,9 @@
}
}
-mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
+mirror::Object* BoxTable::BoxLambda(const ClosureType& closure,
+ const char* class_name,
+ mirror::ClassLoader* class_loader) {
Thread* self = Thread::Current();
{
@@ -91,7 +104,7 @@
// Functional f = () -> 5; // vF = create-lambda
// Object a = f; // vA = box-lambda vA
// Object b = f; // vB = box-lambda vB
- // assert(a == f)
+ // assert(a == b)
ValueType value = FindBoxedLambda(closure);
if (!value.IsNull()) {
return value.Read();
@@ -100,30 +113,62 @@
// Otherwise we need to box ourselves and insert it into the hash map
}
- // Release the lambda table lock here, so that thread suspension is allowed.
+ // Convert the Closure into a managed object instance, whose supertype of java.lang.LambdaProxy.
- // Convert the Closure into a managed byte[] which will serve
- // as the temporary 'boxed' version of the lambda. This is good enough
- // to check all the basic object identities that a boxed lambda must retain.
- // It's also good enough to contain all the captured primitive variables.
-
- // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
// TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
- BoxedClosurePointerType closure_as_array_object =
- mirror::ByteArray::Alloc(self, closure->GetSize());
+ StackHandleScope<2> hs{self}; // NOLINT: [readability/braces] [4]
- // There are no thread suspension points after this, so we don't need to put it into a handle.
+ Handle<mirror::ClassLoader> class_loader_handle = hs.NewHandle(class_loader);
- if (UNLIKELY(closure_as_array_object == nullptr)) {
+ // Release the lambda table lock here, so that thread suspension is allowed.
+ self->AllowThreadSuspension();
+
+ lambda::BoxClassTable* lambda_box_class_table;
+
+ // Find the lambda box class table, which can be in the system class loader if classloader is null
+ if (class_loader == nullptr) {
+ ScopedObjectAccessUnchecked soa(self);
+ mirror::ClassLoader* system_class_loader =
+ soa.Decode<mirror::ClassLoader*>(Runtime::Current()->GetSystemClassLoader());
+ lambda_box_class_table = system_class_loader->GetLambdaProxyCache();
+ } else {
+ lambda_box_class_table = class_loader_handle->GetLambdaProxyCache();
+ // OK: can't be deleted while we hold a handle to the class loader.
+ }
+ DCHECK(lambda_box_class_table != nullptr);
+
+ Handle<mirror::Class> closure_class(hs.NewHandle(
+ lambda_box_class_table->GetOrCreateBoxClass(class_name, class_loader_handle)));
+ if (UNLIKELY(closure_class.Get() == nullptr)) {
// Most likely an OOM has occurred.
- CHECK(self->IsExceptionPending());
+ self->AssertPendingException();
return nullptr;
}
- // Write the raw closure data into the byte[].
- closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t), // component size
- 0 /*index*/), // index
- closure_as_array_object->GetLength());
+ BoxedClosurePointerType closure_as_object = nullptr;
+ UniqueClosurePtr closure_table_copy;
+ // Create an instance of the class, and assign the pointer to the closure into it.
+ {
+ closure_as_object = down_cast<BoxedClosurePointerType>(closure_class->AllocObject(self));
+ if (UNLIKELY(closure_as_object == nullptr)) {
+ self->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ // Make a copy of the closure that we will store in the hash map.
+ // The proxy instance will also point to this same hash map.
+ // Note that the closure pointer is cleaned up only after the proxy is GCd.
+ closure_table_copy.reset(ClosureAllocator::Allocate(closure->GetSize()));
+ closure_as_object->SetClosure(closure_table_copy.get());
+ }
+
+ // There are no thread suspension points after this, so we don't need to put it into a handle.
+ ScopedAssertNoThreadSuspension soants{self, // NOLINT: [whitespace/braces] [5]
+ "box lambda table - box lambda - no more suspensions"}; // NOLINT: [whitespace/braces] [5]
+
+ // Write the raw closure data into the proxy instance's copy of the closure.
+ closure->CopyTo(closure_table_copy.get(),
+ closure->GetSize());
// The method has been successfully boxed into an object, now insert it into the hash map.
{
@@ -134,24 +179,21 @@
// we were allocating the object before.
ValueType value = FindBoxedLambda(closure);
if (UNLIKELY(!value.IsNull())) {
- // Let the GC clean up method_as_object at a later time.
+ // Let the GC clean up closure_as_object at a later time.
+ // (We will not see this object when sweeping, it wasn't inserted yet.)
+ closure_as_object->SetClosure(nullptr);
return value.Read();
}
// Otherwise we need to insert it into the hash map in this thread.
- // Make a copy for the box table to keep, in case the closure gets collected from the stack.
- // TODO: GC may need to sweep for roots in the box table's copy of the closure.
- Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
- closure->CopyTo(closure_table_copy, closure->GetSize());
-
- // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
+ // The closure_table_copy is deleted by us manually when we erase it from the map.
// Actually insert into the table.
- map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
+ map_.Insert({closure_table_copy.release(), ValueType(closure_as_object)});
}
- return closure_as_array_object;
+ return closure_as_object;
}
bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
@@ -165,29 +207,35 @@
mirror::Object* boxed_closure_object = object;
- // Raise ClassCastException if object is not instanceof byte[]
- if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
- ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
+ // Raise ClassCastException if object is not instanceof LambdaProxy
+ if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureBaseClass()))) {
+ ThrowClassCastException(GetBoxedClosureBaseClass(), boxed_closure_object->GetClass());
return false;
}
// TODO(iam): We must check that the closure object extends/implements the type
- // specified in [type id]. This is not currently implemented since it's always a byte[].
+ // specified in [type id]. This is not currently implemented since the type id is unavailable.
// If we got this far, the inputs are valid.
- // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
- BoxedClosurePointerType boxed_closure_as_array =
+ // Shuffle the java.lang.LambdaProxy back into a raw closure, then allocate it, copy,
+ // and return it.
+ BoxedClosurePointerType boxed_closure =
down_cast<BoxedClosurePointerType>(boxed_closure_object);
- const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
+ DCHECK_ALIGNED(boxed_closure->GetClosure(), alignof(Closure));
+ const Closure* aligned_interior_closure = boxed_closure->GetClosure();
+ DCHECK(aligned_interior_closure != nullptr);
+
+ // TODO: we probably don't need to make a copy here later on, once there's GC support.
// Allocate a copy that can "escape" and copy the closure data into that.
Closure* unboxed_closure =
- LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
+ LeakingAllocator::MakeFlexibleInstance<Closure>(self, aligned_interior_closure->GetSize());
+ DCHECK_ALIGNED(unboxed_closure, alignof(Closure));
// TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
- memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
+ memcpy(unboxed_closure, aligned_interior_closure, aligned_interior_closure->GetSize());
- DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
+ DCHECK_EQ(unboxed_closure->GetSize(), aligned_interior_closure->GetSize());
*out_closure = unboxed_closure;
return true;
@@ -236,9 +284,10 @@
if (new_value == nullptr) {
// The object has been swept away.
- const ClosureType& closure = key_value_pair.first;
+ Closure* closure = key_value_pair.first;
// Delete the entry from the map.
+ // (Remove from map first to avoid accessing dangling pointer).
map_iterator = map_.Erase(map_iterator);
// Clean up the memory by deleting the closure.
@@ -290,7 +339,10 @@
}
bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
- return item.first == nullptr;
+ bool is_empty = item.first == nullptr;
+ DCHECK_EQ(item.second.IsNull(), is_empty);
+
+ return is_empty;
}
bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
index adb7332..9dca6ab 100644
--- a/runtime/lambda/box_table.h
+++ b/runtime/lambda/box_table.h
@@ -30,6 +30,9 @@
class ArtMethod; // forward declaration
namespace mirror {
+class Class; // forward declaration
+class ClassLoader; // forward declaration
+class LambdaProxy; // forward declaration
class Object; // forward declaration
} // namespace mirror
@@ -48,8 +51,11 @@
using ClosureType = art::lambda::Closure*;
// Boxes a closure into an object. Returns null and throws an exception on failure.
- mirror::Object* BoxLambda(const ClosureType& closure)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
+ mirror::Object* BoxLambda(const ClosureType& closure,
+ const char* class_name,
+ mirror::ClassLoader* class_loader)
+ REQUIRES(!Locks::lambda_table_lock_, !Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Unboxes an object back into the lambda. Returns false and throws an exception on failure.
bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
@@ -128,7 +134,16 @@
TrackingAllocator<std::pair<ClosureType, ValueType>,
kAllocatorTagLambdaBoxTable>>;
+ using ClassMap = art::HashMap<std::string,
+ GcRoot<mirror::Class>,
+ EmptyFn,
+ HashFn,
+ EqualsFn,
+ TrackingAllocator<std::pair<ClosureType, ValueType>,
+ kAllocatorTagLambdaProxyClassBoxTable>>;
+
UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_);
+ UnorderedMap classes_map_ GUARDED_BY(Locks::lambda_table_lock_);
bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_);
ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_);
diff --git a/runtime/lambda/closure.cc b/runtime/lambda/closure.cc
index 179e4ee..f935e04 100644
--- a/runtime/lambda/closure.cc
+++ b/runtime/lambda/closure.cc
@@ -20,9 +20,6 @@
#include "lambda/art_lambda_method.h"
#include "runtime/mirror/object_reference.h"
-static constexpr const bool kClosureSupportsReferences = false;
-static constexpr const bool kClosureSupportsGarbageCollection = false;
-
namespace art {
namespace lambda {
@@ -128,6 +125,10 @@
return const_cast<ArtMethod*>(lambda_info_->GetArtMethod());
}
+ArtLambdaMethod* Closure::GetLambdaInfo() const {
+ return const_cast<ArtLambdaMethod*>(lambda_info_);
+}
+
uint32_t Closure::GetHashCode() const {
// Start with a non-zero constant, a prime number.
uint32_t result = 17;
diff --git a/runtime/lambda/closure.h b/runtime/lambda/closure.h
index 31ff194..38ec063 100644
--- a/runtime/lambda/closure.h
+++ b/runtime/lambda/closure.h
@@ -33,12 +33,52 @@
class ArtLambdaMethod; // forward declaration
class ClosureBuilder; // forward declaration
+// TODO: Remove these constants once closures are supported properly.
+
+// Does the lambda closure support containing references? If so, all the users of lambdas
+// must be updated to also support references.
+static constexpr const bool kClosureSupportsReferences = false;
+// Does the lambda closure support being garbage collected? If so, all the users of lambdas
+// must be updated to also support garbage collection.
+static constexpr const bool kClosureSupportsGarbageCollection = false;
+// Does the lambda closure support being garbage collected with a read barrier? If so,
+// all the users of the lambdas msut also be updated to support read barrier GC.
+static constexpr const bool kClosureSupportsReadBarrier = false;
+
+// Is this closure being stored as a 'long' in shadow frames and the quick ABI?
+static constexpr const bool kClosureIsStoredAsLong = true;
+
+
+// Raw memory layout for the lambda closure.
+//
+// WARNING:
+// * This should only be used by the compiler and tests, as they need to offsetof the raw fields.
+// * Runtime/interpreter should always access closures through a Closure pointer.
+struct ClosureStorage {
+ // Compile-time known lambda information such as the type descriptor and size.
+ ArtLambdaMethod* lambda_info_;
+
+ // A contiguous list of captured variables, and possibly the closure size.
+ // The runtime size can always be determined through GetSize().
+ union {
+ // Read from here if the closure size is static (ArtLambdaMethod::IsStatic)
+ uint8_t static_variables_[0];
+ struct {
+ // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic)
+ size_t size_; // The lambda_info_ and the size_ itself is also included as part of the size.
+ uint8_t variables_[0];
+ } dynamic_;
+ } captured_[0];
+ // captured_ will always consist of one array element at runtime.
+ // Set to [0] so that 'size_' is not counted in sizeof(Closure).
+};
+
// Inline representation of a lambda closure.
// Contains the target method and the set of packed captured variables as a copy.
//
// The closure itself is logically immutable, although in practice any object references
// it (recursively) contains can be moved and updated by the GC.
-struct PACKED(sizeof(ArtLambdaMethod*)) Closure {
+struct Closure : private ClosureStorage {
// Get the size of the Closure in bytes.
// This is necessary in order to allocate a large enough area to copy the Closure into.
// Do *not* copy the closure with memcpy, since references also need to get moved.
@@ -52,6 +92,9 @@
// Get the target method, i.e. the method that will be dispatched into with invoke-lambda.
ArtMethod* GetTargetMethod() const;
+ // Get the static lambda info that never changes.
+ ArtLambdaMethod* GetLambdaInfo() const;
+
// Calculates the hash code. Value is recomputed each time.
uint32_t GetHashCode() const SHARED_REQUIRES(Locks::mutator_lock_);
@@ -156,28 +199,15 @@
static size_t GetClosureSize(const uint8_t* closure);
///////////////////////////////////////////////////////////////////////////////////
-
- // Compile-time known lambda information such as the type descriptor and size.
- ArtLambdaMethod* lambda_info_;
-
- // A contiguous list of captured variables, and possibly the closure size.
- // The runtime size can always be determined through GetSize().
- union {
- // Read from here if the closure size is static (ArtLambdaMethod::IsStatic)
- uint8_t static_variables_[0];
- struct {
- // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic)
- size_t size_; // The lambda_info_ and the size_ itself is also included as part of the size.
- uint8_t variables_[0];
- } dynamic_;
- } captured_[0];
- // captured_ will always consist of one array element at runtime.
- // Set to [0] so that 'size_' is not counted in sizeof(Closure).
-
- friend class ClosureBuilder;
+ // NOTE: Actual fields are declared in ClosureStorage.
friend class ClosureTest;
};
+// ABI guarantees:
+// * Closure same size as a ClosureStorage
+// * ClosureStorage begins at the same point a Closure would begin.
+static_assert(sizeof(Closure) == sizeof(ClosureStorage), "Closure size must match ClosureStorage");
+
} // namespace lambda
} // namespace art
diff --git a/runtime/lambda/closure_builder.cc b/runtime/lambda/closure_builder.cc
index 739e965..7b36042 100644
--- a/runtime/lambda/closure_builder.cc
+++ b/runtime/lambda/closure_builder.cc
@@ -75,7 +75,7 @@
if (LIKELY(is_dynamic_size_ == false)) {
// Write in the extra bytes to store the dynamic size the first time.
is_dynamic_size_ = true;
- size_ += sizeof(Closure::captured_[0].dynamic_.size_);
+ size_ += sizeof(ClosureStorage::captured_[0].dynamic_.size_);
}
// A closure may be sized dynamically, so always query it for the true size.
@@ -107,38 +107,40 @@
<< "number of variables captured at runtime does not match "
<< "number of variables captured at compile time";
- Closure* closure = new (memory) Closure;
- closure->lambda_info_ = target_method;
+ ClosureStorage* closure_storage = new (memory) ClosureStorage;
+ closure_storage->lambda_info_ = target_method;
- static_assert(offsetof(Closure, captured_) == kInitialSize, "wrong initial size");
+ static_assert(offsetof(ClosureStorage, captured_) == kInitialSize, "wrong initial size");
size_t written_size;
if (UNLIKELY(is_dynamic_size_)) {
// The closure size must be set dynamically (i.e. nested lambdas).
- closure->captured_[0].dynamic_.size_ = GetSize();
- size_t header_size = offsetof(Closure, captured_[0].dynamic_.variables_);
+ closure_storage->captured_[0].dynamic_.size_ = GetSize();
+ size_t header_size = offsetof(ClosureStorage, captured_[0].dynamic_.variables_);
DCHECK_LE(header_size, GetSize());
size_t variables_size = GetSize() - header_size;
written_size =
WriteValues(target_method,
- closure->captured_[0].dynamic_.variables_,
+ closure_storage->captured_[0].dynamic_.variables_,
header_size,
variables_size);
} else {
// The closure size is known statically (i.e. no nested lambdas).
DCHECK(GetSize() == target_method->GetStaticClosureSize());
- size_t header_size = offsetof(Closure, captured_[0].static_variables_);
+ size_t header_size = offsetof(ClosureStorage, captured_[0].static_variables_);
DCHECK_LE(header_size, GetSize());
size_t variables_size = GetSize() - header_size;
written_size =
WriteValues(target_method,
- closure->captured_[0].static_variables_,
+ closure_storage->captured_[0].static_variables_,
header_size,
variables_size);
}
- DCHECK_EQ(written_size, closure->GetSize());
+ // OK: The closure storage is guaranteed to be the same as a closure.
+ Closure* closure = reinterpret_cast<Closure*>(closure_storage);
+ DCHECK_EQ(written_size, closure->GetSize());
return closure;
}
diff --git a/runtime/lambda/shorty_field_type.h b/runtime/lambda/shorty_field_type.h
index 46ddaa9..54bb4d4 100644
--- a/runtime/lambda/shorty_field_type.h
+++ b/runtime/lambda/shorty_field_type.h
@@ -285,6 +285,39 @@
}
}
+ // Get the number of virtual registers necessary to represent this type as a stack local.
+ inline size_t GetVirtualRegisterCount() const {
+ if (IsPrimitiveNarrow()) {
+ return 1;
+ } else if (IsPrimitiveWide()) {
+ return 2;
+ } else if (IsObject()) {
+ return kObjectReferenceSize / sizeof(uint32_t);
+ } else if (IsLambda()) {
+ return 2;
+ } else {
+ DCHECK(false) << "unknown shorty field type '" << static_cast<char>(value_) << "'";
+ UNREACHABLE();
+ }
+ }
+
+ // Count how many virtual registers would be necessary in order to store this list of shorty
+ // field types.
+ inline size_t static CountVirtualRegistersRequired(const char* shorty) {
+ size_t size = 0;
+
+ while (shorty != nullptr && *shorty != '\0') {
+ // Each argument appends to the size.
+ ShortyFieldType shorty_field{*shorty}; // NOLINT [readability/braces] [4]
+
+ size += shorty_field.GetVirtualRegisterCount();
+
+ ++shorty;
+ }
+
+ return size;
+ }
+
// Implicitly convert to the anonymous nested inner type. Used for exhaustive switch detection.
inline operator decltype(kByte)() const {
return value_;
diff --git a/runtime/lambda/shorty_field_type_test.cc b/runtime/lambda/shorty_field_type_test.cc
index 32bade9..430e39e 100644
--- a/runtime/lambda/shorty_field_type_test.cc
+++ b/runtime/lambda/shorty_field_type_test.cc
@@ -218,6 +218,56 @@
}
} // TEST_F
+TEST_F(ShortyFieldTypeTest, TestCalculateVRegSize) {
+ // Make sure the single calculation for each value is correct.
+ std::pair<size_t, char> expected_actual_single[] = {
+ // Primitives
+ { 1u, 'Z' },
+ { 1u, 'B' },
+ { 1u, 'C' },
+ { 1u, 'S' },
+ { 1u, 'I' },
+ { 1u, 'F' },
+ { 2u, 'J' },
+ { 2u, 'D' },
+ // Non-primitives
+ { 1u, 'L' },
+ { 2u, '\\' },
+ };
+
+ for (auto pair : expected_actual_single) {
+ SCOPED_TRACE(pair.second);
+ EXPECT_EQ(pair.first, ShortyFieldType(pair.second).GetVirtualRegisterCount());
+ }
+
+ // Make sure we are correctly calculating how many virtual registers a shorty descriptor takes.
+ std::pair<size_t, const char*> expected_actual[] = {
+ // Empty list
+ { 0u, "" },
+ // Primitives
+ { 1u, "Z" },
+ { 1u, "B" },
+ { 1u, "C" },
+ { 1u, "S" },
+ { 1u, "I" },
+ { 1u, "F" },
+ { 2u, "J" },
+ { 2u, "D" },
+ // Non-primitives
+ { 1u, "L" },
+ { 2u, "\\" },
+ // Multiple things at once:
+ { 10u, "ZBCSIFJD" },
+ { 5u, "LLSSI" },
+ { 6u, "LLL\\L" }
+ };
+
+ for (auto pair : expected_actual) {
+ SCOPED_TRACE(pair.second);
+ EXPECT_EQ(pair.first, ShortyFieldType::CountVirtualRegistersRequired(pair.second));
+ }
+} // TEST_F
+
// Helper class to probe a shorty's characteristics by minimizing copy-and-paste tests.
template <typename T, decltype(ShortyFieldType::kByte) kShortyEnum>
struct ShortyTypeCharacteristics {