blob: 4d1cbc0dd0d1def4b789ec625e48f7f6eb3bfc28 [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070019
Mathieu Chartierbad02672014-08-25 13:08:22 -070020#include "base/allocator.h"
David Sehr67bf42e2018-02-26 16:43:04 -080021#include "base/safe_map.h"
22#include "base/tracking_safe_map.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070023#include "dlmalloc_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070024#include "space.h"
Hans Boehmfb8b4e22018-09-05 16:45:42 -070025#include "thread-current-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080026
27#include <set>
28#include <vector>
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070029
30namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070031namespace gc {
32namespace space {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070033
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -070034class AllocationInfo;
35
Igor Murashkinaaebaa02015-01-26 10:55:53 -080036enum class LargeObjectSpaceType {
37 kDisabled,
38 kMap,
39 kFreeList,
Mathieu Chartier2dbe6272014-09-16 10:43:23 -070040};
41
Ian Rogers22a20862013-03-16 16:34:57 -070042// Abstraction implemented by all large object spaces.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070043class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
44 public:
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010045 SpaceType GetType() const override {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070046 return kSpaceTypeLargeObjectSpace;
47 }
Ian Rogers6fac4472014-02-25 17:01:10 -080048 void SwapBitmaps();
49 void CopyLiveToMarked();
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070050 virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
51 virtual ~LargeObjectSpace() {}
52
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010053 uint64_t GetBytesAllocated() override {
Hans Boehmfb8b4e22018-09-05 16:45:42 -070054 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070055 return num_bytes_allocated_;
56 }
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010057 uint64_t GetObjectsAllocated() override {
Hans Boehmfb8b4e22018-09-05 16:45:42 -070058 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070059 return num_objects_allocated_;
60 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070061 uint64_t GetTotalBytesAllocated() const {
Hans Boehmfb8b4e22018-09-05 16:45:42 -070062 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070063 return total_bytes_allocated_;
64 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070065 uint64_t GetTotalObjectsAllocated() const {
Hans Boehmfb8b4e22018-09-05 16:45:42 -070066 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070067 return total_objects_allocated_;
68 }
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010069 size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
Ian Rogers6fac4472014-02-25 17:01:10 -080070 // LargeObjectSpaces don't have thread local state.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010071 size_t RevokeThreadLocalBuffers(art::Thread*) override {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070072 return 0U;
Ian Rogers6fac4472014-02-25 17:01:10 -080073 }
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010074 size_t RevokeAllThreadLocalBuffers() override {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070075 return 0U;
Ian Rogers6fac4472014-02-25 17:01:10 -080076 }
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010077 bool IsAllocSpace() const override {
Mathieu Chartier590fee92013-09-13 13:46:47 -070078 return true;
79 }
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010080 AllocSpace* AsAllocSpace() override {
Mathieu Chartier590fee92013-09-13 13:46:47 -070081 return this;
82 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -070083 collector::ObjectBytePair Sweep(bool swap_bitmaps);
Roland Levillainf73caca2018-08-24 17:19:07 +010084 bool CanMoveObjects() const override {
Mathieu Chartier31f44142014-04-08 14:40:03 -070085 return false;
86 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070087 // Current address at which the space begins, which may vary as the space is filled.
Ian Rogers13735952014-10-08 12:43:28 -070088 uint8_t* Begin() const {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070089 return begin_;
90 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070091 // Current address at which the space ends, which may vary as the space is filled.
Ian Rogers13735952014-10-08 12:43:28 -070092 uint8_t* End() const {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070093 return end_;
94 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -070095 // Current size of space
96 size_t Size() const {
97 return End() - Begin();
98 }
99 // Return true if we contain the specified address.
Yi Kong39402542019-03-24 02:47:16 -0700100 bool Contains(const mirror::Object* obj) const override {
Ian Rogers13735952014-10-08 12:43:28 -0700101 const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700102 return Begin() <= byte_obj && byte_obj < End();
103 }
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100104 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700105 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700106
Mathieu Chartiere7158112015-06-03 13:32:15 -0700107 // Return true if the large object is a zygote large object. Potentially slow.
108 virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
109 // Called when we create the zygote space, mark all existing large objects as zygote large
110 // objects.
111 virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self) = 0;
112
Lokesh Gidra10d0c962019-03-07 22:40:36 +0000113 virtual void ForEachMemMap(std::function<void(const MemMap&)> func) const = 0;
Mathieu Chartier208aaf02016-10-25 10:45:08 -0700114 // GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
115 // End() from different allocations.
116 virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
117
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700118 protected:
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700119 explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
120 const char* lock_name);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700121 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700122
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700123 // Used to ensure mutual exclusion when the allocation spaces data structures,
124 // including the allocation counters below, are being modified.
125 mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
126
127 // Number of bytes which have been allocated into the space and not yet freed. The count is also
128 // included in the identically named field in Heap. Counts actual allocated (after rounding),
129 // not requested, sizes. TODO: It would be cheaper to just maintain total allocated and total
130 // free counts.
131 uint64_t num_bytes_allocated_ GUARDED_BY(lock_);
132 uint64_t num_objects_allocated_ GUARDED_BY(lock_);
133
134 // Totals for large objects ever allocated, including those that have since been deallocated.
135 // Never decremented.
136 uint64_t total_bytes_allocated_ GUARDED_BY(lock_);
137 uint64_t total_objects_allocated_ GUARDED_BY(lock_);
138
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700139 // Begin and end, may change as more large objects are allocated.
Ian Rogers13735952014-10-08 12:43:28 -0700140 uint8_t* begin_;
141 uint8_t* end_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700142
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700143 friend class Space;
Ian Rogers1d54e732013-05-02 21:10:01 -0700144
145 private:
146 DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700147};
148
Ian Rogers22a20862013-03-16 16:34:57 -0700149// A discontinuous large object space implemented by individual mmap/munmap calls.
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700150class LargeObjectMapSpace : public LargeObjectSpace {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700151 public:
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700152 // Creates a large object space. Allocations into the large object space use memory maps instead
153 // of malloc.
154 static LargeObjectMapSpace* Create(const std::string& name);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700155 // Return the storage space required by obj.
Yi Kong39402542019-03-24 02:47:16 -0700156 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override REQUIRES(!lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800157 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Yi Kong39402542019-03-24 02:47:16 -0700158 size_t* usable_size, size_t* bytes_tl_bulk_allocated) override
Mathieu Chartier90443472015-07-16 20:32:27 -0700159 REQUIRES(!lock_);
Yi Kong39402542019-03-24 02:47:16 -0700160 size_t Free(Thread* self, mirror::Object* ptr) override REQUIRES(!lock_);
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100161 void Walk(DlMallocSpace::WalkCallback, void* arg) override REQUIRES(!lock_);
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700162 // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
Yi Kong39402542019-03-24 02:47:16 -0700163 bool Contains(const mirror::Object* obj) const override NO_THREAD_SAFETY_ANALYSIS;
Lokesh Gidra10d0c962019-03-07 22:40:36 +0000164 void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100165 std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
Mathieu Chartier208aaf02016-10-25 10:45:08 -0700166
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700167 protected:
Mathieu Chartiere7158112015-06-03 13:32:15 -0700168 struct LargeObject {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100169 MemMap mem_map;
Mathieu Chartiere7158112015-06-03 13:32:15 -0700170 bool is_zygote;
171 };
Brian Carlstrom93ba8932013-07-17 21:31:49 -0700172 explicit LargeObjectMapSpace(const std::string& name);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700173 virtual ~LargeObjectMapSpace() {}
174
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100175 bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
176 void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
Mathieu Chartiere7158112015-06-03 13:32:15 -0700177
Mathieu Chartiere7158112015-06-03 13:32:15 -0700178 AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_
Mathieu Chartierbad02672014-08-25 13:08:22 -0700179 GUARDED_BY(lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700180};
181
Ian Rogers22a20862013-03-16 16:34:57 -0700182// A continuous large object space with a free-list to handle holes.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100183class FreeListSpace final : public LargeObjectSpace {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700184 public:
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700185 static constexpr size_t kAlignment = kPageSize;
186
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700187 virtual ~FreeListSpace();
Vladimir Marko11306592018-10-26 14:22:59 +0100188 static FreeListSpace* Create(const std::string& name, size_t capacity);
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100189 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
Mathieu Chartier90443472015-07-16 20:32:27 -0700190 REQUIRES(lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800191 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Mathieu Chartier90443472015-07-16 20:32:27 -0700192 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100193 override REQUIRES(!lock_);
194 size_t Free(Thread* self, mirror::Object* obj) override REQUIRES(!lock_);
195 void Walk(DlMallocSpace::WalkCallback callback, void* arg) override REQUIRES(!lock_);
Yi Kong39402542019-03-24 02:47:16 -0700196 void Dump(std::ostream& os) const override REQUIRES(!lock_);
Lokesh Gidra10d0c962019-03-07 22:40:36 +0000197 void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100198 std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
Mathieu Chartier208aaf02016-10-25 10:45:08 -0700199
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700200 protected:
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100201 FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700202 size_t GetSlotIndexForAddress(uintptr_t address) const {
203 DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
204 return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
205 }
206 size_t GetSlotIndexForAllocationInfo(const AllocationInfo* info) const;
207 AllocationInfo* GetAllocationInfoForAddress(uintptr_t address);
208 const AllocationInfo* GetAllocationInfoForAddress(uintptr_t address) const;
209 uintptr_t GetAllocationAddressForSlot(size_t slot) const {
210 return reinterpret_cast<uintptr_t>(Begin()) + slot * kAlignment;
211 }
212 uintptr_t GetAddressForAllocationInfo(const AllocationInfo* info) const {
213 return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
214 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700215 // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
Mathieu Chartier90443472015-07-16 20:32:27 -0700216 void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100217 bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override;
218 void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700219
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700220 class SortByPrevFree {
221 public:
222 bool operator()(const AllocationInfo* a, const AllocationInfo* b) const;
223 };
224 typedef std::set<AllocationInfo*, SortByPrevFree,
225 TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>> FreeBlocks;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700226
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700227 // There is not footer for any allocations at the end of the space, so we keep track of how much
228 // free space there is at the end manually.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100229 MemMap mem_map_;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700230 // Side table for allocation info, one per page.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100231 MemMap allocation_info_map_;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700232 AllocationInfo* allocation_info_;
233
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700234 // Free bytes at the end of the space.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700235 size_t free_end_ GUARDED_BY(lock_);
236 FreeBlocks free_blocks_ GUARDED_BY(lock_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700237};
238
Ian Rogers1d54e732013-05-02 21:10:01 -0700239} // namespace space
240} // namespace gc
241} // namespace art
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700242
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700243#endif // ART_RUNTIME_GC_SPACE_LARGE_OBJECT_SPACE_H_