Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ |
| 18 | #define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ |
| 19 | |
| 20 | #include "atomic_integer.h" |
| 21 | #include "barrier.h" |
| 22 | #include "base/macros.h" |
| 23 | #include "base/mutex.h" |
| 24 | #include "garbage_collector.h" |
| 25 | #include "offsets.h" |
| 26 | #include "root_visitor.h" |
| 27 | #include "UniquePtr.h" |
| 28 | |
| 29 | namespace art { |
| 30 | |
| 31 | namespace mirror { |
| 32 | class Class; |
| 33 | class Object; |
| 34 | template<class T> class ObjectArray; |
| 35 | } // namespace mirror |
| 36 | |
| 37 | class StackVisitor; |
| 38 | class Thread; |
| 39 | |
| 40 | namespace gc { |
| 41 | |
| 42 | namespace accounting { |
| 43 | template <typename T> class AtomicStack; |
| 44 | class MarkIfReachesAllocspaceVisitor; |
| 45 | class ModUnionClearCardVisitor; |
| 46 | class ModUnionVisitor; |
| 47 | class ModUnionTableBitmap; |
| 48 | class MarkStackChunk; |
| 49 | typedef AtomicStack<mirror::Object*> ObjectStack; |
| 50 | class SpaceBitmap; |
| 51 | } // namespace accounting |
| 52 | |
| 53 | namespace space { |
| 54 | class BumpPointerSpace; |
| 55 | class ContinuousMemMapAllocSpace; |
| 56 | class ContinuousSpace; |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 57 | class MallocSpace; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 58 | } // namespace space |
| 59 | |
| 60 | class Heap; |
| 61 | |
| 62 | namespace collector { |
| 63 | |
| 64 | class SemiSpace : public GarbageCollector { |
| 65 | public: |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 66 | explicit SemiSpace(Heap* heap, bool generational = false, |
| 67 | const std::string& name_prefix = ""); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 68 | |
| 69 | ~SemiSpace() {} |
| 70 | |
| 71 | virtual void InitializePhase(); |
| 72 | virtual bool IsConcurrent() const { |
| 73 | return false; |
| 74 | } |
| 75 | virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 76 | virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 77 | virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 78 | virtual void MarkReachableObjects() |
| 79 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); |
| 80 | virtual GcType GetGcType() const { |
| 81 | return kGcTypePartial; |
| 82 | } |
| 83 | |
| 84 | // Sets which space we will be copying objects to. |
| 85 | void SetToSpace(space::ContinuousMemMapAllocSpace* to_space); |
| 86 | |
| 87 | // Set the space where we copy objects from. |
| 88 | void SetFromSpace(space::ContinuousMemMapAllocSpace* from_space); |
| 89 | |
| 90 | // Initializes internal structures. |
| 91 | void Init(); |
| 92 | |
| 93 | // Find the default mark bitmap. |
| 94 | void FindDefaultMarkBitmap(); |
| 95 | |
| 96 | // Returns the new address of the object. |
| 97 | mirror::Object* MarkObject(mirror::Object* object) |
| 98 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 99 | |
| 100 | void ScanObject(mirror::Object* obj) |
| 101 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 102 | |
| 103 | // Marks the root set at the start of a garbage collection. |
| 104 | void MarkRoots() |
| 105 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 106 | |
| 107 | // Make a space immune, immune spaces have all live objects marked - that is the mark and |
| 108 | // live bitmaps are bound together. |
| 109 | void ImmuneSpace(space::ContinuousSpace* space) |
| 110 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) |
| 111 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 112 | |
| 113 | // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie |
| 114 | // the image. Mark that portion of the heap as immune. |
| 115 | virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 116 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 117 | void UnBindBitmaps() |
| 118 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 119 | |
| 120 | void ProcessReferences(Thread* self) |
| 121 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 122 | |
| 123 | // Sweeps unmarked objects to complete the garbage collection. |
| 124 | virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 125 | |
| 126 | // Sweeps unmarked objects to complete the garbage collection. |
| 127 | void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 128 | |
| 129 | // Sweep only pointers within an array. WARNING: Trashes objects. |
| 130 | void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) |
| 131 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 132 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 133 | // TODO: enable thread safety analysis when in use by multiple worker threads. |
| 134 | template <typename MarkVisitor> |
| 135 | void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) |
| 136 | NO_THREAD_SAFETY_ANALYSIS; |
| 137 | |
| 138 | void SweepSystemWeaks() |
| 139 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 140 | |
| 141 | template <typename Visitor> |
| 142 | static void VisitObjectReferencesAndClass(mirror::Object* obj, const Visitor& visitor) |
| 143 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 144 | |
| 145 | static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg) |
| 146 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 147 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 148 | static mirror::Object* RecursiveMarkObjectCallback(mirror::Object* root, void* arg) |
| 149 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 150 | |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 151 | virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj) |
| 152 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 153 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 154 | protected: |
| 155 | // Returns null if the object is not marked, otherwise returns the forwarding address (same as |
| 156 | // object for non movable things). |
| 157 | mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const; |
| 158 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 159 | static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg) |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 160 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 161 | |
| 162 | // Marks or unmarks a large object based on whether or not set is true. If set is true, then we |
| 163 | // mark, otherwise we unmark. |
| 164 | bool MarkLargeObject(const mirror::Object* obj) |
| 165 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 166 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 167 | // Expand mark stack to 2x its current size. |
| 168 | void ResizeMarkStack(size_t new_size); |
| 169 | |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 170 | // Returns true if we should sweep the space. |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame^] | 171 | virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const; |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 172 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 173 | // Returns how many threads we should use for the current GC phase based on if we are paused, |
| 174 | // whether or not we care about pauses. |
| 175 | size_t GetThreadCount(bool paused) const; |
| 176 | |
| 177 | // Returns true if an object is inside of the immune region (assumed to be marked). |
| 178 | bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE { |
| 179 | return obj >= immune_begin_ && obj < immune_end_; |
| 180 | } |
| 181 | |
| 182 | bool IsImmuneSpace(const space::ContinuousSpace* space) const; |
| 183 | |
| 184 | static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg, |
| 185 | const StackVisitor *visitor); |
| 186 | |
| 187 | void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor) |
| 188 | NO_THREAD_SAFETY_ANALYSIS; |
| 189 | |
| 190 | template <typename Visitor> |
| 191 | static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj, |
| 192 | const Visitor& visitor) |
| 193 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 194 | |
| 195 | // Visit the header, static field references, and interface pointers of a class object. |
| 196 | template <typename Visitor> |
| 197 | static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj, |
| 198 | const Visitor& visitor) |
| 199 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 200 | |
| 201 | template <typename Visitor> |
| 202 | static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor) |
| 203 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 204 | |
| 205 | template <typename Visitor> |
| 206 | static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static, |
| 207 | const Visitor& visitor) |
| 208 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 209 | |
| 210 | // Visit all of the references in an object array. |
| 211 | template <typename Visitor> |
| 212 | static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array, |
| 213 | const Visitor& visitor) |
| 214 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 215 | |
| 216 | // Visits the header and field references of a data object. |
| 217 | template <typename Visitor> |
| 218 | static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj, |
| 219 | const Visitor& visitor) |
| 220 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { |
| 221 | return VisitInstanceFieldsReferences(klass, obj, visitor); |
| 222 | } |
| 223 | |
| 224 | // Push an object onto the mark stack. |
| 225 | inline void MarkStackPush(mirror::Object* obj); |
| 226 | |
| 227 | void UpdateAndMarkModUnion() |
| 228 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) |
| 229 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 230 | |
| 231 | // Schedules an unmarked object for reference processing. |
| 232 | void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference) |
| 233 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 234 | |
| 235 | // Recursively blackens objects on the mark stack. |
| 236 | void ProcessMarkStack(bool paused) |
| 237 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); |
| 238 | |
| 239 | void EnqueueFinalizerReferences(mirror::Object** ref) |
| 240 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); |
| 241 | |
| 242 | void PreserveSomeSoftReferences(mirror::Object** ref) |
| 243 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); |
| 244 | |
| 245 | void ClearWhiteReferences(mirror::Object** list) |
| 246 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); |
| 247 | |
| 248 | void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references, |
| 249 | mirror::Object** weak_references, |
| 250 | mirror::Object** finalizer_references, |
| 251 | mirror::Object** phantom_references) |
| 252 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); |
| 253 | |
| 254 | inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const; |
| 255 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 256 | // Current space, we check this space first to avoid searching for the appropriate space for an |
| 257 | // object. |
| 258 | accounting::ObjectStack* mark_stack_; |
| 259 | |
| 260 | // Immune range, every object inside the immune range is assumed to be marked. |
| 261 | mirror::Object* immune_begin_; |
| 262 | mirror::Object* immune_end_; |
| 263 | |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 264 | // If true, the large object space is immune. |
| 265 | bool is_large_object_space_immune_; |
| 266 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 267 | // Destination and source spaces (can be any type of ContinuousMemMapAllocSpace which either has |
| 268 | // a live bitmap or doesn't). |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 269 | space::ContinuousMemMapAllocSpace* to_space_; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 270 | accounting::SpaceBitmap* to_space_live_bitmap_; // Cached live bitmap as an optimization. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 271 | space::ContinuousMemMapAllocSpace* from_space_; |
| 272 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 273 | Thread* self_; |
| 274 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 275 | // When true, the generational mode (promotion and the bump pointer |
| 276 | // space only collection) is enabled. TODO: move these to a new file |
| 277 | // as a new garbage collector? |
| 278 | bool generational_; |
| 279 | |
| 280 | // Used for the generational mode. the end/top of the bump |
| 281 | // pointer space at the end of the last collection. |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 282 | byte* last_gc_to_space_end_; |
| 283 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 284 | // Used for the generational mode. During a collection, keeps track |
| 285 | // of how many bytes of objects have been copied so far from the |
| 286 | // bump pointer space to the non-moving space. |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 287 | uint64_t bytes_promoted_; |
| 288 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 289 | // Used for the generational mode. When true, collect the whole |
| 290 | // heap. When false, collect only the bump pointer spaces. |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 291 | bool whole_heap_collection_; |
| 292 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 293 | // Used for the generational mode. A counter used to enable |
| 294 | // whole_heap_collection_ once per interval. |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 295 | int whole_heap_collection_interval_counter_; |
| 296 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 297 | // Used for the generational mode. The default interval of the whole |
| 298 | // heap collection. If N, the whole heap collection occurs every N |
| 299 | // collections. |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 300 | static constexpr int kDefaultWholeHeapCollectionInterval = 5; |
| 301 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 302 | private: |
| 303 | DISALLOW_COPY_AND_ASSIGN(SemiSpace); |
| 304 | }; |
| 305 | |
| 306 | } // namespace collector |
| 307 | } // namespace gc |
| 308 | } // namespace art |
| 309 | |
| 310 | #endif // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_ |