blob: 05ff55b9f11300363370761cb410e06cae098f97 [file] [log] [blame]
Ian Rogers1d54e732013-05-02 21:10:01 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_SPACE_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_SPACE_H_
Ian Rogers1d54e732013-05-02 21:10:01 -070019
Ian Rogers700a4022014-05-19 16:49:03 -070020#include <memory>
Ian Rogers1d54e732013-05-02 21:10:01 -070021#include <string>
22
David Sehrc431b9d2018-03-02 12:01:51 -080023#include "base/atomic.h"
Andreas Gampe7fbc4a52018-11-28 08:26:47 -080024#include "base/locks.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070025#include "base/macros.h"
David Sehr79e26072018-04-06 17:58:50 -070026#include "base/mem_map.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070027#include "gc/accounting/space_bitmap.h"
Andreas Gamped4901292017-05-30 18:41:34 -070028#include "gc/collector/object_byte_pair.h"
Andreas Gampe5a0430d2019-01-04 14:33:57 -080029#include "runtime_globals.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070030
31namespace art {
32namespace mirror {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080033class Object;
Ian Rogers1d54e732013-05-02 21:10:01 -070034} // namespace mirror
35
36namespace gc {
37
Ian Rogers1d54e732013-05-02 21:10:01 -070038class Heap;
39
40namespace space {
41
Mathieu Chartier590fee92013-09-13 13:46:47 -070042class AllocSpace;
Mathieu Chartier7410f292013-11-24 13:17:35 -080043class BumpPointerSpace;
Mathieu Chartiera1602f22014-01-13 17:19:19 -080044class ContinuousMemMapAllocSpace;
Mathieu Chartier590fee92013-09-13 13:46:47 -070045class ContinuousSpace;
Mathieu Chartier590fee92013-09-13 13:46:47 -070046class DiscontinuousSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047class MallocSpace;
48class DlMallocSpace;
49class RosAllocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070050class ImageSpace;
51class LargeObjectSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080052class RegionSpace;
Mathieu Chartiera1602f22014-01-13 17:19:19 -080053class ZygoteSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070054
Mathieu Chartier0f72e412013-09-06 16:40:01 -070055static constexpr bool kDebugSpaces = kIsDebugBuild;
Ian Rogers1d54e732013-05-02 21:10:01 -070056
57// See Space::GetGcRetentionPolicy.
58enum GcRetentionPolicy {
59 // Objects are retained forever with this policy for a space.
60 kGcRetentionPolicyNeverCollect,
61 // Every GC cycle will attempt to collect objects in this space.
62 kGcRetentionPolicyAlwaysCollect,
63 // Objects will be considered for collection only in "full" GC cycles, ie faster partial
64 // collections won't scan these areas such as the Zygote.
65 kGcRetentionPolicyFullCollect,
66};
67std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
68
69enum SpaceType {
70 kSpaceTypeImageSpace,
Mathieu Chartiera1602f22014-01-13 17:19:19 -080071 kSpaceTypeMallocSpace,
Ian Rogers1d54e732013-05-02 21:10:01 -070072 kSpaceTypeZygoteSpace,
Mathieu Chartier590fee92013-09-13 13:46:47 -070073 kSpaceTypeBumpPointerSpace,
Ian Rogers1d54e732013-05-02 21:10:01 -070074 kSpaceTypeLargeObjectSpace,
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080075 kSpaceTypeRegionSpace,
Ian Rogers1d54e732013-05-02 21:10:01 -070076};
77std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
78
79// A space contains memory allocated for managed objects.
80class Space {
81 public:
82 // Dump space. Also key method for C++ vtables.
83 virtual void Dump(std::ostream& os) const;
84
85 // Name of the space. May vary, for example before/after the Zygote fork.
86 const char* GetName() const {
87 return name_.c_str();
88 }
89
90 // The policy of when objects are collected associated with this space.
91 GcRetentionPolicy GetGcRetentionPolicy() const {
92 return gc_retention_policy_;
93 }
94
Ian Rogers1d54e732013-05-02 21:10:01 -070095 // Is the given object contained within this space?
96 virtual bool Contains(const mirror::Object* obj) const = 0;
97
98 // The kind of space this: image, alloc, zygote, large object.
99 virtual SpaceType GetType() const = 0;
100
101 // Is this an image space, ie one backed by a memory mapped image file.
102 bool IsImageSpace() const {
103 return GetType() == kSpaceTypeImageSpace;
104 }
105 ImageSpace* AsImageSpace();
106
107 // Is this a dlmalloc backed allocation space?
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700108 bool IsMallocSpace() const {
Ian Rogers1d54e732013-05-02 21:10:01 -0700109 SpaceType type = GetType();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800110 return type == kSpaceTypeMallocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -0700111 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700112 MallocSpace* AsMallocSpace();
113
114 virtual bool IsDlMallocSpace() const {
115 return false;
116 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800117 virtual DlMallocSpace* AsDlMallocSpace();
118
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700119 virtual bool IsRosAllocSpace() const {
120 return false;
121 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800122 virtual RosAllocSpace* AsRosAllocSpace();
Ian Rogers1d54e732013-05-02 21:10:01 -0700123
Ian Rogers6fac4472014-02-25 17:01:10 -0800124 // Is this the space allocated into by the Zygote and no-longer in use for allocation?
Ian Rogers1d54e732013-05-02 21:10:01 -0700125 bool IsZygoteSpace() const {
126 return GetType() == kSpaceTypeZygoteSpace;
127 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800128 virtual ZygoteSpace* AsZygoteSpace();
Ian Rogers1d54e732013-05-02 21:10:01 -0700129
Mathieu Chartier590fee92013-09-13 13:46:47 -0700130 // Is this space a bump pointer space?
131 bool IsBumpPointerSpace() const {
132 return GetType() == kSpaceTypeBumpPointerSpace;
133 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800134 virtual BumpPointerSpace* AsBumpPointerSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700135
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800136 bool IsRegionSpace() const {
137 return GetType() == kSpaceTypeRegionSpace;
138 }
139 virtual RegionSpace* AsRegionSpace();
140
Ian Rogers1d54e732013-05-02 21:10:01 -0700141 // Does this space hold large objects and implement the large object space abstraction?
142 bool IsLargeObjectSpace() const {
143 return GetType() == kSpaceTypeLargeObjectSpace;
144 }
145 LargeObjectSpace* AsLargeObjectSpace();
146
Mathieu Chartier590fee92013-09-13 13:46:47 -0700147 virtual bool IsContinuousSpace() const {
148 return false;
149 }
150 ContinuousSpace* AsContinuousSpace();
151
152 virtual bool IsDiscontinuousSpace() const {
153 return false;
154 }
155 DiscontinuousSpace* AsDiscontinuousSpace();
156
157 virtual bool IsAllocSpace() const {
158 return false;
159 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800160 virtual AllocSpace* AsAllocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700161
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800162 virtual bool IsContinuousMemMapAllocSpace() const {
163 return false;
164 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800165 virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800166
Mathieu Chartier31f44142014-04-08 14:40:03 -0700167 // Returns true if objects in the space are movable.
168 virtual bool CanMoveObjects() const = 0;
169
Ian Rogers1d54e732013-05-02 21:10:01 -0700170 virtual ~Space() {}
171
172 protected:
173 Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
174
175 void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
176 gc_retention_policy_ = gc_retention_policy;
177 }
178
179 // Name of the space that may vary due to the Zygote fork.
180 std::string name_;
181
Mathieu Chartier590fee92013-09-13 13:46:47 -0700182 protected:
Ian Rogers1d54e732013-05-02 21:10:01 -0700183 // When should objects within this space be reclaimed? Not constant as we vary it in the case
184 // of Zygote forking.
185 GcRetentionPolicy gc_retention_policy_;
186
Mathieu Chartier590fee92013-09-13 13:46:47 -0700187 private:
Ian Rogers1d54e732013-05-02 21:10:01 -0700188 friend class art::gc::Heap;
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700189 DISALLOW_IMPLICIT_CONSTRUCTORS(Space);
Ian Rogers1d54e732013-05-02 21:10:01 -0700190};
191std::ostream& operator<<(std::ostream& os, const Space& space);
192
193// AllocSpace interface.
194class AllocSpace {
195 public:
196 // Number of bytes currently allocated.
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700197 virtual uint64_t GetBytesAllocated() = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700198 // Number of objects currently allocated.
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700199 virtual uint64_t GetObjectsAllocated() = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700200
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700201 // Allocate num_bytes without allowing growth. If the allocation
202 // succeeds, the output parameter bytes_allocated will be set to the
203 // actually allocated bytes which is >= num_bytes.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700204 // Alloc can be called from multiple threads at the same time and must be thread-safe.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700205 //
206 // bytes_tl_bulk_allocated - bytes allocated in bulk ahead of time for a thread local allocation,
207 // if applicable. It can be
208 // 1) equal to bytes_allocated if it's not a thread local allocation,
209 // 2) greater than bytes_allocated if it's a thread local
210 // allocation that required a new buffer, or
211 // 3) zero if it's a thread local allocation in an existing
212 // buffer.
213 // This is what is to be added to Heap::num_bytes_allocated_.
Ian Rogers6fac4472014-02-25 17:01:10 -0800214 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700215 size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700216
Mathieu Chartier0651d412014-04-29 14:37:57 -0700217 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
218 virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700219 size_t* usable_size,
220 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700221 REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700222 return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700223 }
224
Ian Rogers1d54e732013-05-02 21:10:01 -0700225 // Return the storage space required by obj.
Ian Rogers6fac4472014-02-25 17:01:10 -0800226 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700227
228 // Returns how many bytes were freed.
229 virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
230
231 // Returns how many bytes were freed.
232 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
233
Ian Rogers6fac4472014-02-25 17:01:10 -0800234 // Revoke any sort of thread-local buffers that are used to speed up allocations for the given
235 // thread, if the alloc space implementation uses any.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700236 // Returns the total free bytes in the revoked thread local runs that's to be subtracted
237 // from Heap::num_bytes_allocated_ or zero if unnecessary.
238 virtual size_t RevokeThreadLocalBuffers(Thread* thread) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700239
Ian Rogers6fac4472014-02-25 17:01:10 -0800240 // Revoke any sort of thread-local buffers that are used to speed up allocations for all the
241 // threads, if the alloc space implementation uses any.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700242 // Returns the total free bytes in the revoked thread local runs that's to be subtracted
243 // from Heap::num_bytes_allocated_ or zero if unnecessary.
244 virtual size_t RevokeAllThreadLocalBuffers() = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700245
Mathieu Chartierb363f662014-07-16 13:28:58 -0700246 virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
247
Ian Rogers1d54e732013-05-02 21:10:01 -0700248 protected:
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700249 struct SweepCallbackContext {
250 SweepCallbackContext(bool swap_bitmaps, space::Space* space);
251 const bool swap_bitmaps;
252 space::Space* const space;
253 Thread* const self;
254 collector::ObjectBytePair freed;
255 };
256
Ian Rogers1d54e732013-05-02 21:10:01 -0700257 AllocSpace() {}
258 virtual ~AllocSpace() {}
259
260 private:
261 DISALLOW_COPY_AND_ASSIGN(AllocSpace);
262};
263
264// Continuous spaces have bitmaps, and an address range. Although not required, objects within
265// continuous spaces can be marked in the card table.
266class ContinuousSpace : public Space {
267 public:
Mathieu Chartier590fee92013-09-13 13:46:47 -0700268 // Address at which the space begins.
Ian Rogers13735952014-10-08 12:43:28 -0700269 uint8_t* Begin() const {
Ian Rogers1d54e732013-05-02 21:10:01 -0700270 return begin_;
271 }
272
Mathieu Chartier590fee92013-09-13 13:46:47 -0700273 // Current address at which the space ends, which may vary as the space is filled.
Ian Rogers13735952014-10-08 12:43:28 -0700274 uint8_t* End() const {
Orion Hodson88591fe2018-03-06 13:35:43 +0000275 return end_.load(std::memory_order_relaxed);
Ian Rogers1d54e732013-05-02 21:10:01 -0700276 }
277
Mathieu Chartier590fee92013-09-13 13:46:47 -0700278 // The end of the address range covered by the space.
Ian Rogers13735952014-10-08 12:43:28 -0700279 uint8_t* Limit() const {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700280 return limit_;
281 }
282
283 // Change the end of the space. Be careful with use since changing the end of a space to an
284 // invalid value may break the GC.
Ian Rogers13735952014-10-08 12:43:28 -0700285 void SetEnd(uint8_t* end) {
Orion Hodson88591fe2018-03-06 13:35:43 +0000286 end_.store(end, std::memory_order_relaxed);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700287 }
288
Ian Rogers13735952014-10-08 12:43:28 -0700289 void SetLimit(uint8_t* limit) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700290 limit_ = limit;
291 }
292
Ian Rogers1d54e732013-05-02 21:10:01 -0700293 // Current size of space
294 size_t Size() const {
295 return End() - Begin();
296 }
297
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700298 virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() const = 0;
299 virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() const = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700300
Mathieu Chartier590fee92013-09-13 13:46:47 -0700301 // Maximum which the mapped space can grow to.
302 virtual size_t Capacity() const {
303 return Limit() - Begin();
304 }
305
Ian Rogers1d54e732013-05-02 21:10:01 -0700306 // Is object within this space? We check to see if the pointer is beyond the end first as
307 // continuous spaces are iterated over from low to high.
308 bool HasAddress(const mirror::Object* obj) const {
Ian Rogers13735952014-10-08 12:43:28 -0700309 const uint8_t* byte_ptr = reinterpret_cast<const uint8_t*>(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700310 return byte_ptr >= Begin() && byte_ptr < Limit();
Ian Rogers1d54e732013-05-02 21:10:01 -0700311 }
312
313 bool Contains(const mirror::Object* obj) const {
314 return HasAddress(obj);
315 }
316
Mathieu Chartier590fee92013-09-13 13:46:47 -0700317 virtual bool IsContinuousSpace() const {
318 return true;
319 }
320
Ian Rogers1d54e732013-05-02 21:10:01 -0700321 virtual ~ContinuousSpace() {}
322
323 protected:
324 ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
Ian Rogers13735952014-10-08 12:43:28 -0700325 uint8_t* begin, uint8_t* end, uint8_t* limit) :
Mathieu Chartier590fee92013-09-13 13:46:47 -0700326 Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700327 }
328
Ian Rogers1d54e732013-05-02 21:10:01 -0700329 // The beginning of the storage for fast access.
Ian Rogers13735952014-10-08 12:43:28 -0700330 uint8_t* begin_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700331
332 // Current end of the space.
Ian Rogers13735952014-10-08 12:43:28 -0700333 Atomic<uint8_t*> end_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700334
335 // Limit of the space.
Ian Rogers13735952014-10-08 12:43:28 -0700336 uint8_t* limit_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700337
338 private:
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700339 DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousSpace);
Ian Rogers1d54e732013-05-02 21:10:01 -0700340};
341
342// A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
343// the card table can't cover these objects and so the write barrier shouldn't be triggered. This
344// is suitable for use for large primitive arrays.
345class DiscontinuousSpace : public Space {
346 public:
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700347 accounting::LargeObjectBitmap* GetLiveBitmap() const {
348 return live_bitmap_.get();
Ian Rogers1d54e732013-05-02 21:10:01 -0700349 }
350
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700351 accounting::LargeObjectBitmap* GetMarkBitmap() const {
352 return mark_bitmap_.get();
Ian Rogers1d54e732013-05-02 21:10:01 -0700353 }
354
Roland Levillainf73caca2018-08-24 17:19:07 +0100355 bool IsDiscontinuousSpace() const override {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700356 return true;
357 }
358
Ian Rogers1d54e732013-05-02 21:10:01 -0700359 virtual ~DiscontinuousSpace() {}
360
361 protected:
362 DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
363
Ian Rogers700a4022014-05-19 16:49:03 -0700364 std::unique_ptr<accounting::LargeObjectBitmap> live_bitmap_;
365 std::unique_ptr<accounting::LargeObjectBitmap> mark_bitmap_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700366
367 private:
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700368 DISALLOW_IMPLICIT_CONSTRUCTORS(DiscontinuousSpace);
Ian Rogers1d54e732013-05-02 21:10:01 -0700369};
370
371class MemMapSpace : public ContinuousSpace {
372 public:
Ian Rogers1d54e732013-05-02 21:10:01 -0700373 // Size of the space without a limit on its growth. By default this is just the Capacity, but
374 // for the allocation space we support starting with a small heap and then extending it.
375 virtual size_t NonGrowthLimitCapacity() const {
376 return Capacity();
377 }
378
Ian Rogers1d54e732013-05-02 21:10:01 -0700379 MemMap* GetMemMap() {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100380 return &mem_map_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700381 }
382
383 const MemMap* GetMemMap() const {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100384 return &mem_map_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700385 }
386
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100387 MemMap ReleaseMemMap() {
388 return std::move(mem_map_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800389 }
390
Mathieu Chartier590fee92013-09-13 13:46:47 -0700391 protected:
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000392 MemMapSpace(const std::string& name,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100393 MemMap&& mem_map,
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000394 uint8_t* begin,
395 uint8_t* end,
396 uint8_t* limit,
397 GcRetentionPolicy gc_retention_policy)
Mathieu Chartier590fee92013-09-13 13:46:47 -0700398 : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100399 mem_map_(std::move(mem_map)) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700400 }
401
Ian Rogers1d54e732013-05-02 21:10:01 -0700402 // Underlying storage of the space
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100403 MemMap mem_map_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700404
Mathieu Chartier590fee92013-09-13 13:46:47 -0700405 private:
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700406 DISALLOW_IMPLICIT_CONSTRUCTORS(MemMapSpace);
Ian Rogers1d54e732013-05-02 21:10:01 -0700407};
408
Mathieu Chartier590fee92013-09-13 13:46:47 -0700409// Used by the heap compaction interface to enable copying from one type of alloc space to another.
410class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
411 public:
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100412 bool IsAllocSpace() const override {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700413 return true;
414 }
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100415 AllocSpace* AsAllocSpace() override {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700416 return this;
417 }
418
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100419 bool IsContinuousMemMapAllocSpace() const override {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800420 return true;
421 }
Yi Kong39402542019-03-24 02:47:16 -0700422 ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() override {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800423 return this;
424 }
425
Mathieu Chartier90443472015-07-16 20:32:27 -0700426 bool HasBoundBitmaps() const REQUIRES(Locks::heap_bitmap_lock_);
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000427 // Make the mark bitmap an alias of the live bitmap. Save the current mark bitmap into
428 // `temp_bitmap_`, so that we can restore it later in ContinuousMemMapAllocSpace::UnBindBitmaps.
Mathieu Chartier90443472015-07-16 20:32:27 -0700429 void BindLiveToMarkBitmap() REQUIRES(Locks::heap_bitmap_lock_);
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000430 // Unalias the mark bitmap from the live bitmap and restore the old mark bitmap.
Mathieu Chartier90443472015-07-16 20:32:27 -0700431 void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
Mathieu Chartier1f3b5352014-02-03 14:00:42 -0800432 // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
433 void SwapBitmaps();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800434
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700435 // Clear the space back to an empty space.
Ian Rogers6fac4472014-02-25 17:01:10 -0800436 virtual void Clear() = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700437
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100438 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800439 return live_bitmap_.get();
440 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800441
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100442 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800443 return mark_bitmap_.get();
444 }
445
Lokesh Gidra10d0c962019-03-07 22:40:36 +0000446 accounting::ContinuousSpaceBitmap* GetTempBitmap() const {
447 return temp_bitmap_.get();
448 }
449
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700450 collector::ObjectBytePair Sweep(bool swap_bitmaps);
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700451 virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0;
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800452
Mathieu Chartier590fee92013-09-13 13:46:47 -0700453 protected:
Ian Rogers700a4022014-05-19 16:49:03 -0700454 std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_;
455 std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
456 std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800457
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100458 ContinuousMemMapAllocSpace(const std::string& name,
459 MemMap&& mem_map,
460 uint8_t* begin,
461 uint8_t* end,
462 uint8_t* limit,
463 GcRetentionPolicy gc_retention_policy)
464 : MemMapSpace(name, std::move(mem_map), begin, end, limit, gc_retention_policy) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700465 }
466
467 private:
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800468 friend class gc::Heap;
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700469 DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousMemMapAllocSpace);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700470};
471
Ian Rogers1d54e732013-05-02 21:10:01 -0700472} // namespace space
473} // namespace gc
474} // namespace art
475
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700476#endif // ART_RUNTIME_GC_SPACE_SPACE_H_