Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_GC_SPACE_SPACE_H_ |
| 18 | #define ART_RUNTIME_GC_SPACE_SPACE_H_ |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 19 | |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 20 | #include <memory> |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 21 | #include <string> |
| 22 | |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 23 | #include "base/atomic.h" |
Andreas Gampe | 7fbc4a5 | 2018-11-28 08:26:47 -0800 | [diff] [blame] | 24 | #include "base/locks.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 25 | #include "base/macros.h" |
David Sehr | 79e2607 | 2018-04-06 17:58:50 -0700 | [diff] [blame] | 26 | #include "base/mem_map.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 27 | #include "gc/accounting/space_bitmap.h" |
Andreas Gampe | d490129 | 2017-05-30 18:41:34 -0700 | [diff] [blame] | 28 | #include "gc/collector/object_byte_pair.h" |
Andreas Gampe | 5a0430d | 2019-01-04 14:33:57 -0800 | [diff] [blame] | 29 | #include "runtime_globals.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 30 | |
| 31 | namespace art { |
| 32 | namespace mirror { |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 33 | class Object; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 34 | } // namespace mirror |
| 35 | |
| 36 | namespace gc { |
| 37 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 38 | class Heap; |
| 39 | |
| 40 | namespace space { |
| 41 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 42 | class AllocSpace; |
Mathieu Chartier | 7410f29 | 2013-11-24 13:17:35 -0800 | [diff] [blame] | 43 | class BumpPointerSpace; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 44 | class ContinuousMemMapAllocSpace; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 45 | class ContinuousSpace; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 46 | class DiscontinuousSpace; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 47 | class MallocSpace; |
| 48 | class DlMallocSpace; |
| 49 | class RosAllocSpace; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 50 | class ImageSpace; |
| 51 | class LargeObjectSpace; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 52 | class RegionSpace; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 53 | class ZygoteSpace; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 54 | |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 55 | static constexpr bool kDebugSpaces = kIsDebugBuild; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 56 | |
| 57 | // See Space::GetGcRetentionPolicy. |
| 58 | enum GcRetentionPolicy { |
| 59 | // Objects are retained forever with this policy for a space. |
| 60 | kGcRetentionPolicyNeverCollect, |
| 61 | // Every GC cycle will attempt to collect objects in this space. |
| 62 | kGcRetentionPolicyAlwaysCollect, |
| 63 | // Objects will be considered for collection only in "full" GC cycles, ie faster partial |
| 64 | // collections won't scan these areas such as the Zygote. |
| 65 | kGcRetentionPolicyFullCollect, |
| 66 | }; |
| 67 | std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy); |
| 68 | |
| 69 | enum SpaceType { |
| 70 | kSpaceTypeImageSpace, |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 71 | kSpaceTypeMallocSpace, |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 72 | kSpaceTypeZygoteSpace, |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 73 | kSpaceTypeBumpPointerSpace, |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 74 | kSpaceTypeLargeObjectSpace, |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 75 | kSpaceTypeRegionSpace, |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 76 | }; |
| 77 | std::ostream& operator<<(std::ostream& os, const SpaceType& space_type); |
| 78 | |
| 79 | // A space contains memory allocated for managed objects. |
| 80 | class Space { |
| 81 | public: |
| 82 | // Dump space. Also key method for C++ vtables. |
| 83 | virtual void Dump(std::ostream& os) const; |
| 84 | |
| 85 | // Name of the space. May vary, for example before/after the Zygote fork. |
| 86 | const char* GetName() const { |
| 87 | return name_.c_str(); |
| 88 | } |
| 89 | |
| 90 | // The policy of when objects are collected associated with this space. |
| 91 | GcRetentionPolicy GetGcRetentionPolicy() const { |
| 92 | return gc_retention_policy_; |
| 93 | } |
| 94 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 95 | // Is the given object contained within this space? |
| 96 | virtual bool Contains(const mirror::Object* obj) const = 0; |
| 97 | |
| 98 | // The kind of space this: image, alloc, zygote, large object. |
| 99 | virtual SpaceType GetType() const = 0; |
| 100 | |
| 101 | // Is this an image space, ie one backed by a memory mapped image file. |
| 102 | bool IsImageSpace() const { |
| 103 | return GetType() == kSpaceTypeImageSpace; |
| 104 | } |
| 105 | ImageSpace* AsImageSpace(); |
| 106 | |
| 107 | // Is this a dlmalloc backed allocation space? |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 108 | bool IsMallocSpace() const { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 109 | SpaceType type = GetType(); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 110 | return type == kSpaceTypeMallocSpace; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 111 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 112 | MallocSpace* AsMallocSpace(); |
| 113 | |
| 114 | virtual bool IsDlMallocSpace() const { |
| 115 | return false; |
| 116 | } |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 117 | virtual DlMallocSpace* AsDlMallocSpace(); |
| 118 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 119 | virtual bool IsRosAllocSpace() const { |
| 120 | return false; |
| 121 | } |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 122 | virtual RosAllocSpace* AsRosAllocSpace(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 123 | |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 124 | // Is this the space allocated into by the Zygote and no-longer in use for allocation? |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 125 | bool IsZygoteSpace() const { |
| 126 | return GetType() == kSpaceTypeZygoteSpace; |
| 127 | } |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 128 | virtual ZygoteSpace* AsZygoteSpace(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 129 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 130 | // Is this space a bump pointer space? |
| 131 | bool IsBumpPointerSpace() const { |
| 132 | return GetType() == kSpaceTypeBumpPointerSpace; |
| 133 | } |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 134 | virtual BumpPointerSpace* AsBumpPointerSpace(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 135 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 136 | bool IsRegionSpace() const { |
| 137 | return GetType() == kSpaceTypeRegionSpace; |
| 138 | } |
| 139 | virtual RegionSpace* AsRegionSpace(); |
| 140 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 141 | // Does this space hold large objects and implement the large object space abstraction? |
| 142 | bool IsLargeObjectSpace() const { |
| 143 | return GetType() == kSpaceTypeLargeObjectSpace; |
| 144 | } |
| 145 | LargeObjectSpace* AsLargeObjectSpace(); |
| 146 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 147 | virtual bool IsContinuousSpace() const { |
| 148 | return false; |
| 149 | } |
| 150 | ContinuousSpace* AsContinuousSpace(); |
| 151 | |
| 152 | virtual bool IsDiscontinuousSpace() const { |
| 153 | return false; |
| 154 | } |
| 155 | DiscontinuousSpace* AsDiscontinuousSpace(); |
| 156 | |
| 157 | virtual bool IsAllocSpace() const { |
| 158 | return false; |
| 159 | } |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 160 | virtual AllocSpace* AsAllocSpace(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 161 | |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 162 | virtual bool IsContinuousMemMapAllocSpace() const { |
| 163 | return false; |
| 164 | } |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 165 | virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace(); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 166 | |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame] | 167 | // Returns true if objects in the space are movable. |
| 168 | virtual bool CanMoveObjects() const = 0; |
| 169 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 170 | virtual ~Space() {} |
| 171 | |
| 172 | protected: |
| 173 | Space(const std::string& name, GcRetentionPolicy gc_retention_policy); |
| 174 | |
| 175 | void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) { |
| 176 | gc_retention_policy_ = gc_retention_policy; |
| 177 | } |
| 178 | |
| 179 | // Name of the space that may vary due to the Zygote fork. |
| 180 | std::string name_; |
| 181 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 182 | protected: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 183 | // When should objects within this space be reclaimed? Not constant as we vary it in the case |
| 184 | // of Zygote forking. |
| 185 | GcRetentionPolicy gc_retention_policy_; |
| 186 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 187 | private: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 188 | friend class art::gc::Heap; |
Mathieu Chartier | 3130cdf | 2015-05-03 15:20:23 -0700 | [diff] [blame] | 189 | DISALLOW_IMPLICIT_CONSTRUCTORS(Space); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 190 | }; |
| 191 | std::ostream& operator<<(std::ostream& os, const Space& space); |
| 192 | |
| 193 | // AllocSpace interface. |
| 194 | class AllocSpace { |
| 195 | public: |
| 196 | // Number of bytes currently allocated. |
Hiroshi Yamauchi | be031ff | 2013-10-08 16:42:37 -0700 | [diff] [blame] | 197 | virtual uint64_t GetBytesAllocated() = 0; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 198 | // Number of objects currently allocated. |
Hiroshi Yamauchi | be031ff | 2013-10-08 16:42:37 -0700 | [diff] [blame] | 199 | virtual uint64_t GetObjectsAllocated() = 0; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 200 | |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 201 | // Allocate num_bytes without allowing growth. If the allocation |
| 202 | // succeeds, the output parameter bytes_allocated will be set to the |
| 203 | // actually allocated bytes which is >= num_bytes. |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 204 | // Alloc can be called from multiple threads at the same time and must be thread-safe. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 205 | // |
| 206 | // bytes_tl_bulk_allocated - bytes allocated in bulk ahead of time for a thread local allocation, |
| 207 | // if applicable. It can be |
| 208 | // 1) equal to bytes_allocated if it's not a thread local allocation, |
| 209 | // 2) greater than bytes_allocated if it's a thread local |
| 210 | // allocation that required a new buffer, or |
| 211 | // 3) zero if it's a thread local allocation in an existing |
| 212 | // buffer. |
| 213 | // This is what is to be added to Heap::num_bytes_allocated_. |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 214 | virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 215 | size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 216 | |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 217 | // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector. |
| 218 | virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 219 | size_t* usable_size, |
| 220 | size_t* bytes_tl_bulk_allocated) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 221 | REQUIRES(Locks::mutator_lock_) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 222 | return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 223 | } |
| 224 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 225 | // Return the storage space required by obj. |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 226 | virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 227 | |
| 228 | // Returns how many bytes were freed. |
| 229 | virtual size_t Free(Thread* self, mirror::Object* ptr) = 0; |
| 230 | |
| 231 | // Returns how many bytes were freed. |
| 232 | virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0; |
| 233 | |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 234 | // Revoke any sort of thread-local buffers that are used to speed up allocations for the given |
| 235 | // thread, if the alloc space implementation uses any. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 236 | // Returns the total free bytes in the revoked thread local runs that's to be subtracted |
| 237 | // from Heap::num_bytes_allocated_ or zero if unnecessary. |
| 238 | virtual size_t RevokeThreadLocalBuffers(Thread* thread) = 0; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 239 | |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 240 | // Revoke any sort of thread-local buffers that are used to speed up allocations for all the |
| 241 | // threads, if the alloc space implementation uses any. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 242 | // Returns the total free bytes in the revoked thread local runs that's to be subtracted |
| 243 | // from Heap::num_bytes_allocated_ or zero if unnecessary. |
| 244 | virtual size_t RevokeAllThreadLocalBuffers() = 0; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 245 | |
Mathieu Chartier | b363f66 | 2014-07-16 13:28:58 -0700 | [diff] [blame] | 246 | virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0; |
| 247 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 248 | protected: |
Mathieu Chartier | 10fb83a | 2014-06-15 15:15:43 -0700 | [diff] [blame] | 249 | struct SweepCallbackContext { |
| 250 | SweepCallbackContext(bool swap_bitmaps, space::Space* space); |
| 251 | const bool swap_bitmaps; |
| 252 | space::Space* const space; |
| 253 | Thread* const self; |
| 254 | collector::ObjectBytePair freed; |
| 255 | }; |
| 256 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 257 | AllocSpace() {} |
| 258 | virtual ~AllocSpace() {} |
| 259 | |
| 260 | private: |
| 261 | DISALLOW_COPY_AND_ASSIGN(AllocSpace); |
| 262 | }; |
| 263 | |
| 264 | // Continuous spaces have bitmaps, and an address range. Although not required, objects within |
| 265 | // continuous spaces can be marked in the card table. |
| 266 | class ContinuousSpace : public Space { |
| 267 | public: |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 268 | // Address at which the space begins. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 269 | uint8_t* Begin() const { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 270 | return begin_; |
| 271 | } |
| 272 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 273 | // Current address at which the space ends, which may vary as the space is filled. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 274 | uint8_t* End() const { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 275 | return end_.load(std::memory_order_relaxed); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 276 | } |
| 277 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 278 | // The end of the address range covered by the space. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 279 | uint8_t* Limit() const { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 280 | return limit_; |
| 281 | } |
| 282 | |
| 283 | // Change the end of the space. Be careful with use since changing the end of a space to an |
| 284 | // invalid value may break the GC. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 285 | void SetEnd(uint8_t* end) { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 286 | end_.store(end, std::memory_order_relaxed); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 287 | } |
| 288 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 289 | void SetLimit(uint8_t* limit) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 290 | limit_ = limit; |
| 291 | } |
| 292 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 293 | // Current size of space |
| 294 | size_t Size() const { |
| 295 | return End() - Begin(); |
| 296 | } |
| 297 | |
Mathieu Chartier | a8e8f9c | 2014-04-09 14:51:05 -0700 | [diff] [blame] | 298 | virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() const = 0; |
| 299 | virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() const = 0; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 300 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 301 | // Maximum which the mapped space can grow to. |
| 302 | virtual size_t Capacity() const { |
| 303 | return Limit() - Begin(); |
| 304 | } |
| 305 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 306 | // Is object within this space? We check to see if the pointer is beyond the end first as |
| 307 | // continuous spaces are iterated over from low to high. |
| 308 | bool HasAddress(const mirror::Object* obj) const { |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 309 | const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(obj); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 310 | return byte_ptr >= Begin() && byte_ptr < Limit(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 311 | } |
| 312 | |
| 313 | bool Contains(const mirror::Object* obj) const { |
| 314 | return HasAddress(obj); |
| 315 | } |
| 316 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 317 | virtual bool IsContinuousSpace() const { |
| 318 | return true; |
| 319 | } |
| 320 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 321 | virtual ~ContinuousSpace() {} |
| 322 | |
| 323 | protected: |
| 324 | ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy, |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 325 | uint8_t* begin, uint8_t* end, uint8_t* limit) : |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 326 | Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 327 | } |
| 328 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 329 | // The beginning of the storage for fast access. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 330 | uint8_t* begin_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 331 | |
| 332 | // Current end of the space. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 333 | Atomic<uint8_t*> end_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 334 | |
| 335 | // Limit of the space. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 336 | uint8_t* limit_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 337 | |
| 338 | private: |
Mathieu Chartier | 3130cdf | 2015-05-03 15:20:23 -0700 | [diff] [blame] | 339 | DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousSpace); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 340 | }; |
| 341 | |
| 342 | // A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently |
| 343 | // the card table can't cover these objects and so the write barrier shouldn't be triggered. This |
| 344 | // is suitable for use for large primitive arrays. |
| 345 | class DiscontinuousSpace : public Space { |
| 346 | public: |
Mathieu Chartier | bbd695c | 2014-04-16 09:48:48 -0700 | [diff] [blame] | 347 | accounting::LargeObjectBitmap* GetLiveBitmap() const { |
| 348 | return live_bitmap_.get(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 349 | } |
| 350 | |
Mathieu Chartier | bbd695c | 2014-04-16 09:48:48 -0700 | [diff] [blame] | 351 | accounting::LargeObjectBitmap* GetMarkBitmap() const { |
| 352 | return mark_bitmap_.get(); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 353 | } |
| 354 | |
Roland Levillain | f73caca | 2018-08-24 17:19:07 +0100 | [diff] [blame] | 355 | bool IsDiscontinuousSpace() const override { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 356 | return true; |
| 357 | } |
| 358 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 359 | virtual ~DiscontinuousSpace() {} |
| 360 | |
| 361 | protected: |
| 362 | DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy); |
| 363 | |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 364 | std::unique_ptr<accounting::LargeObjectBitmap> live_bitmap_; |
| 365 | std::unique_ptr<accounting::LargeObjectBitmap> mark_bitmap_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 366 | |
| 367 | private: |
Mathieu Chartier | 3130cdf | 2015-05-03 15:20:23 -0700 | [diff] [blame] | 368 | DISALLOW_IMPLICIT_CONSTRUCTORS(DiscontinuousSpace); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 369 | }; |
| 370 | |
| 371 | class MemMapSpace : public ContinuousSpace { |
| 372 | public: |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 373 | // Size of the space without a limit on its growth. By default this is just the Capacity, but |
| 374 | // for the allocation space we support starting with a small heap and then extending it. |
| 375 | virtual size_t NonGrowthLimitCapacity() const { |
| 376 | return Capacity(); |
| 377 | } |
| 378 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 379 | MemMap* GetMemMap() { |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 380 | return &mem_map_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 381 | } |
| 382 | |
| 383 | const MemMap* GetMemMap() const { |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 384 | return &mem_map_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 385 | } |
| 386 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 387 | MemMap ReleaseMemMap() { |
| 388 | return std::move(mem_map_); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 389 | } |
| 390 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 391 | protected: |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame] | 392 | MemMapSpace(const std::string& name, |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 393 | MemMap&& mem_map, |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame] | 394 | uint8_t* begin, |
| 395 | uint8_t* end, |
| 396 | uint8_t* limit, |
| 397 | GcRetentionPolicy gc_retention_policy) |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 398 | : ContinuousSpace(name, gc_retention_policy, begin, end, limit), |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 399 | mem_map_(std::move(mem_map)) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 400 | } |
| 401 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 402 | // Underlying storage of the space |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 403 | MemMap mem_map_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 404 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 405 | private: |
Mathieu Chartier | 3130cdf | 2015-05-03 15:20:23 -0700 | [diff] [blame] | 406 | DISALLOW_IMPLICIT_CONSTRUCTORS(MemMapSpace); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 407 | }; |
| 408 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 409 | // Used by the heap compaction interface to enable copying from one type of alloc space to another. |
| 410 | class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace { |
| 411 | public: |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 412 | bool IsAllocSpace() const override { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 413 | return true; |
| 414 | } |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 415 | AllocSpace* AsAllocSpace() override { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 416 | return this; |
| 417 | } |
| 418 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 419 | bool IsContinuousMemMapAllocSpace() const override { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 420 | return true; |
| 421 | } |
Yi Kong | 3940254 | 2019-03-24 02:47:16 -0700 | [diff] [blame^] | 422 | ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() override { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 423 | return this; |
| 424 | } |
| 425 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 426 | bool HasBoundBitmaps() const REQUIRES(Locks::heap_bitmap_lock_); |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame] | 427 | // Make the mark bitmap an alias of the live bitmap. Save the current mark bitmap into |
| 428 | // `temp_bitmap_`, so that we can restore it later in ContinuousMemMapAllocSpace::UnBindBitmaps. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 429 | void BindLiveToMarkBitmap() REQUIRES(Locks::heap_bitmap_lock_); |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame] | 430 | // Unalias the mark bitmap from the live bitmap and restore the old mark bitmap. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 431 | void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_); |
Mathieu Chartier | 1f3b535 | 2014-02-03 14:00:42 -0800 | [diff] [blame] | 432 | // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping. |
| 433 | void SwapBitmaps(); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 434 | |
Mathieu Chartier | a8e8f9c | 2014-04-09 14:51:05 -0700 | [diff] [blame] | 435 | // Clear the space back to an empty space. |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 436 | virtual void Clear() = 0; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 437 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 438 | accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 439 | return live_bitmap_.get(); |
| 440 | } |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 441 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 442 | accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 443 | return mark_bitmap_.get(); |
| 444 | } |
| 445 | |
Lokesh Gidra | 10d0c96 | 2019-03-07 22:40:36 +0000 | [diff] [blame] | 446 | accounting::ContinuousSpaceBitmap* GetTempBitmap() const { |
| 447 | return temp_bitmap_.get(); |
| 448 | } |
| 449 | |
Mathieu Chartier | 10fb83a | 2014-06-15 15:15:43 -0700 | [diff] [blame] | 450 | collector::ObjectBytePair Sweep(bool swap_bitmaps); |
Mathieu Chartier | a8e8f9c | 2014-04-09 14:51:05 -0700 | [diff] [blame] | 451 | virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 452 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 453 | protected: |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 454 | std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_; |
| 455 | std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_; |
| 456 | std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 457 | |
Vladimir Marko | c34bebf | 2018-08-16 16:12:49 +0100 | [diff] [blame] | 458 | ContinuousMemMapAllocSpace(const std::string& name, |
| 459 | MemMap&& mem_map, |
| 460 | uint8_t* begin, |
| 461 | uint8_t* end, |
| 462 | uint8_t* limit, |
| 463 | GcRetentionPolicy gc_retention_policy) |
| 464 | : MemMapSpace(name, std::move(mem_map), begin, end, limit, gc_retention_policy) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 465 | } |
| 466 | |
| 467 | private: |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 468 | friend class gc::Heap; |
Mathieu Chartier | 3130cdf | 2015-05-03 15:20:23 -0700 | [diff] [blame] | 469 | DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousMemMapAllocSpace); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 470 | }; |
| 471 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 472 | } // namespace space |
| 473 | } // namespace gc |
| 474 | } // namespace art |
| 475 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 476 | #endif // ART_RUNTIME_GC_SPACE_SPACE_H_ |