blob: b69bd911623816dc1056befb093c5a68fcad4e07 [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070019
Mathieu Chartierbad02672014-08-25 13:08:22 -070020#include "base/allocator.h"
David Sehr67bf42e2018-02-26 16:43:04 -080021#include "base/safe_map.h"
22#include "base/tracking_safe_map.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070023#include "dlmalloc_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070024#include "space.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080025
26#include <set>
27#include <vector>
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070028
29namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070030namespace gc {
31namespace space {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070032
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -070033class AllocationInfo;
34
Igor Murashkinaaebaa02015-01-26 10:55:53 -080035enum class LargeObjectSpaceType {
36 kDisabled,
37 kMap,
38 kFreeList,
Mathieu Chartier2dbe6272014-09-16 10:43:23 -070039};
40
Ian Rogers22a20862013-03-16 16:34:57 -070041// Abstraction implemented by all large object spaces.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070042class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
43 public:
Ian Rogers6fac4472014-02-25 17:01:10 -080044 SpaceType GetType() const OVERRIDE {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070045 return kSpaceTypeLargeObjectSpace;
46 }
Ian Rogers6fac4472014-02-25 17:01:10 -080047 void SwapBitmaps();
48 void CopyLiveToMarked();
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070049 virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
50 virtual ~LargeObjectSpace() {}
51
Ian Rogers6fac4472014-02-25 17:01:10 -080052 uint64_t GetBytesAllocated() OVERRIDE {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070053 return num_bytes_allocated_;
54 }
Ian Rogers6fac4472014-02-25 17:01:10 -080055 uint64_t GetObjectsAllocated() OVERRIDE {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070056 return num_objects_allocated_;
57 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070058 uint64_t GetTotalBytesAllocated() const {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070059 return total_bytes_allocated_;
60 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070061 uint64_t GetTotalObjectsAllocated() const {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070062 return total_objects_allocated_;
63 }
Ian Rogers6fac4472014-02-25 17:01:10 -080064 size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
Ian Rogers6fac4472014-02-25 17:01:10 -080065 // LargeObjectSpaces don't have thread local state.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070066 size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
67 return 0U;
Ian Rogers6fac4472014-02-25 17:01:10 -080068 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070069 size_t RevokeAllThreadLocalBuffers() OVERRIDE {
70 return 0U;
Ian Rogers6fac4472014-02-25 17:01:10 -080071 }
Ian Rogers6fac4472014-02-25 17:01:10 -080072 bool IsAllocSpace() const OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -070073 return true;
74 }
Ian Rogers6fac4472014-02-25 17:01:10 -080075 AllocSpace* AsAllocSpace() OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -070076 return this;
77 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -070078 collector::ObjectBytePair Sweep(bool swap_bitmaps);
Mathieu Chartier31f44142014-04-08 14:40:03 -070079 virtual bool CanMoveObjects() const OVERRIDE {
80 return false;
81 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070082 // Current address at which the space begins, which may vary as the space is filled.
Ian Rogers13735952014-10-08 12:43:28 -070083 uint8_t* Begin() const {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070084 return begin_;
85 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070086 // Current address at which the space ends, which may vary as the space is filled.
Ian Rogers13735952014-10-08 12:43:28 -070087 uint8_t* End() const {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070088 return end_;
89 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -070090 // Current size of space
91 size_t Size() const {
92 return End() - Begin();
93 }
94 // Return true if we contain the specified address.
95 bool Contains(const mirror::Object* obj) const {
Ian Rogers13735952014-10-08 12:43:28 -070096 const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -070097 return Begin() <= byte_obj && byte_obj < End();
98 }
Mathieu Chartierb363f662014-07-16 13:28:58 -070099 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700100 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700101
Mathieu Chartiere7158112015-06-03 13:32:15 -0700102 // Return true if the large object is a zygote large object. Potentially slow.
103 virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
104 // Called when we create the zygote space, mark all existing large objects as zygote large
105 // objects.
106 virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self) = 0;
107
Mathieu Chartier208aaf02016-10-25 10:45:08 -0700108 // GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
109 // End() from different allocations.
110 virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
111
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700112 protected:
Ian Rogers13735952014-10-08 12:43:28 -0700113 explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700114 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700115
116 // Approximate number of bytes which have been allocated into the space.
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700117 uint64_t num_bytes_allocated_;
118 uint64_t num_objects_allocated_;
119 uint64_t total_bytes_allocated_;
120 uint64_t total_objects_allocated_;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700121 // Begin and end, may change as more large objects are allocated.
Ian Rogers13735952014-10-08 12:43:28 -0700122 uint8_t* begin_;
123 uint8_t* end_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700124
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700125 friend class Space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700126
127 private:
128 DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700129};
130
Ian Rogers22a20862013-03-16 16:34:57 -0700131// A discontinuous large object space implemented by individual mmap/munmap calls.
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700132class LargeObjectMapSpace : public LargeObjectSpace {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700133 public:
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700134 // Creates a large object space. Allocations into the large object space use memory maps instead
135 // of malloc.
136 static LargeObjectMapSpace* Create(const std::string& name);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700137 // Return the storage space required by obj.
Mathieu Chartier90443472015-07-16 20:32:27 -0700138 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) REQUIRES(!lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800139 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Mathieu Chartier90443472015-07-16 20:32:27 -0700140 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
141 REQUIRES(!lock_);
142 size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_);
143 void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_);
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700144 // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
Ian Rogers1d54e732013-05-02 21:10:01 -0700145 bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
146
Mathieu Chartier208aaf02016-10-25 10:45:08 -0700147 std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
148
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700149 protected:
Mathieu Chartiere7158112015-06-03 13:32:15 -0700150 struct LargeObject {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100151 MemMap mem_map;
Mathieu Chartiere7158112015-06-03 13:32:15 -0700152 bool is_zygote;
153 };
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700154 explicit LargeObjectMapSpace(const std::string& name);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700155 virtual ~LargeObjectMapSpace() {}
156
Mathieu Chartier90443472015-07-16 20:32:27 -0700157 bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_);
158 void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
Mathieu Chartiere7158112015-06-03 13:32:15 -0700159
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700160 // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
Ian Rogers22a20862013-03-16 16:34:57 -0700161 mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mathieu Chartiere7158112015-06-03 13:32:15 -0700162 AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_
Mathieu Chartierbad02672014-08-25 13:08:22 -0700163 GUARDED_BY(lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700164};
165
Ian Rogers22a20862013-03-16 16:34:57 -0700166// A continuous large object space with a free-list to handle holes.
Ian Rogers6fac4472014-02-25 17:01:10 -0800167class FreeListSpace FINAL : public LargeObjectSpace {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700168 public:
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700169 static constexpr size_t kAlignment = kPageSize;
170
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700171 virtual ~FreeListSpace();
Ian Rogers13735952014-10-08 12:43:28 -0700172 static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity);
Ian Rogers6fac4472014-02-25 17:01:10 -0800173 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
Mathieu Chartier90443472015-07-16 20:32:27 -0700174 REQUIRES(lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800175 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Mathieu Chartier90443472015-07-16 20:32:27 -0700176 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
177 OVERRIDE REQUIRES(!lock_);
178 size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_);
179 void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_);
180 void Dump(std::ostream& os) const REQUIRES(!lock_);
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700181
Mathieu Chartier208aaf02016-10-25 10:45:08 -0700182 std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const OVERRIDE REQUIRES(!lock_);
183
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700184 protected:
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100185 FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700186 size_t GetSlotIndexForAddress(uintptr_t address) const {
187 DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
188 return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
189 }
190 size_t GetSlotIndexForAllocationInfo(const AllocationInfo* info) const;
191 AllocationInfo* GetAllocationInfoForAddress(uintptr_t address);
192 const AllocationInfo* GetAllocationInfoForAddress(uintptr_t address) const;
193 uintptr_t GetAllocationAddressForSlot(size_t slot) const {
194 return reinterpret_cast<uintptr_t>(Begin()) + slot * kAlignment;
195 }
196 uintptr_t GetAddressForAllocationInfo(const AllocationInfo* info) const {
197 return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
198 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700199 // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
Mathieu Chartier90443472015-07-16 20:32:27 -0700200 void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
Mathieu Chartiere7158112015-06-03 13:32:15 -0700201 bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE;
Mathieu Chartier90443472015-07-16 20:32:27 -0700202 void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700203
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700204 class SortByPrevFree {
205 public:
206 bool operator()(const AllocationInfo* a, const AllocationInfo* b) const;
207 };
208 typedef std::set<AllocationInfo*, SortByPrevFree,
209 TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>> FreeBlocks;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700210
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700211 // There is not footer for any allocations at the end of the space, so we keep track of how much
212 // free space there is at the end manually.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100213 MemMap mem_map_;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700214 // Side table for allocation info, one per page.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100215 MemMap allocation_info_map_;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700216 AllocationInfo* allocation_info_;
217
Mathieu Chartiere7158112015-06-03 13:32:15 -0700218 mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700219 // Free bytes at the end of the space.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700220 size_t free_end_ GUARDED_BY(lock_);
221 FreeBlocks free_blocks_ GUARDED_BY(lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700222};
223
Ian Rogers1d54e732013-05-02 21:10:01 -0700224} // namespace space
225} // namespace gc
226} // namespace art
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700227
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700228#endif // ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_