blob: 3e4961a9f09155ea0afa0bb254a7f65a580d8b1c [file] [log] [blame]
Mathieu Chartier590fee92013-09-13 13:46:47 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
19
20#include "space.h"
21
Andreas Gampe7fbc4a52018-11-28 08:26:47 -080022#include "base/mutex.h"
23
Mathieu Chartier590fee92013-09-13 13:46:47 -070024namespace art {
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070025
26namespace mirror {
27class Object;
28}
29
Mathieu Chartier590fee92013-09-13 13:46:47 -070030namespace gc {
31
32namespace collector {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080033class MarkSweep;
Mathieu Chartier590fee92013-09-13 13:46:47 -070034} // namespace collector
35
36namespace space {
37
Ian Rogers6fac4472014-02-25 17:01:10 -080038// A bump pointer space allocates by incrementing a pointer, it doesn't provide a free
39// implementation as its intended to be evacuated.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010040class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
Mathieu Chartier590fee92013-09-13 13:46:47 -070041 public:
42 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
43
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010044 SpaceType GetType() const override {
Mathieu Chartier590fee92013-09-13 13:46:47 -070045 return kSpaceTypeBumpPointerSpace;
46 }
47
48 // Create a bump pointer space with the requested sizes. The requested base address is not
49 // guaranteed to be granted, if it is required, the caller should call Begin on the returned
50 // space to confirm the request was granted.
Vladimir Marko11306592018-10-26 14:22:59 +010051 static BumpPointerSpace* Create(const std::string& name, size_t capacity);
Vladimir Markoc34bebf2018-08-16 16:12:49 +010052 static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap&& mem_map);
Mathieu Chartier590fee92013-09-13 13:46:47 -070053
Mathieu Chartier2cebb242015-04-21 16:50:40 -070054 // Allocate num_bytes, returns null if the space is full.
Ian Rogers6fac4472014-02-25 17:01:10 -080055 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010056 size_t* usable_size, size_t* bytes_tl_bulk_allocated) override;
Mathieu Chartier0651d412014-04-29 14:37:57 -070057 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
58 mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070059 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010060 override REQUIRES(Locks::mutator_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -070061
Mathieu Chartier590fee92013-09-13 13:46:47 -070062 mirror::Object* AllocNonvirtual(size_t num_bytes);
Mathieu Chartier692fafd2013-11-29 17:24:40 -080063 mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes);
Mathieu Chartier590fee92013-09-13 13:46:47 -070064
65 // Return the storage space required by obj.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010066 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070067 REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogers6fac4472014-02-25 17:01:10 -080068 return AllocationSizeNonvirtual(obj, usable_size);
69 }
Mathieu Chartier590fee92013-09-13 13:46:47 -070070
Mathieu Chartier692fafd2013-11-29 17:24:40 -080071 // NOPS unless we support free lists.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010072 size_t Free(Thread*, mirror::Object*) override {
Mathieu Chartier590fee92013-09-13 13:46:47 -070073 return 0;
74 }
75
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010076 size_t FreeList(Thread*, size_t, mirror::Object**) override {
Ian Rogers6fac4472014-02-25 17:01:10 -080077 return 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -070078 }
79
Ian Rogers6fac4472014-02-25 17:01:10 -080080 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070081 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -080082
Mathieu Chartier590fee92013-09-13 13:46:47 -070083 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
84 // maximum reserved size of the heap.
85 void ClearGrowthLimit() {
86 growth_end_ = Limit();
87 }
88
89 // Override capacity so that we only return the possibly limited capacity
Yi Kong39402542019-03-24 02:47:16 -070090 size_t Capacity() const override {
Mathieu Chartier590fee92013-09-13 13:46:47 -070091 return growth_end_ - begin_;
92 }
93
94 // The total amount of memory reserved for the space.
Yi Kong39402542019-03-24 02:47:16 -070095 size_t NonGrowthLimitCapacity() const override {
Mathieu Chartier590fee92013-09-13 13:46:47 -070096 return GetMemMap()->Size();
97 }
98
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010099 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700100 return nullptr;
101 }
102
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100103 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700104 return nullptr;
105 }
106
Mathieu Chartier31f44142014-04-08 14:40:03 -0700107 // Reset the space to empty.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100108 void Clear() override REQUIRES(!block_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700109
Yi Kong39402542019-03-24 02:47:16 -0700110 void Dump(std::ostream& os) const override;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700111
Yi Kong39402542019-03-24 02:47:16 -0700112 size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!block_lock_);
113 size_t RevokeAllThreadLocalBuffers() override
Mathieu Chartier90443472015-07-16 20:32:27 -0700114 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
115 void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_);
116 void AssertAllThreadLocalBuffersAreRevoked()
117 REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700118
Yi Kong39402542019-03-24 02:47:16 -0700119 uint64_t GetBytesAllocated() override REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700120 REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
Yi Kong39402542019-03-24 02:47:16 -0700121 uint64_t GetObjectsAllocated() override REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700122 REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800123 bool IsEmpty() const {
124 return Begin() == End();
125 }
126
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100127 bool CanMoveObjects() const override {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700128 return true;
129 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700130
Yi Kong39402542019-03-24 02:47:16 -0700131 bool Contains(const mirror::Object* obj) const override {
Ian Rogers13735952014-10-08 12:43:28 -0700132 const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700133 return byte_obj >= Begin() && byte_obj < End();
134 }
135
136 // TODO: Change this? Mainly used for compacting to a particular region of memory.
Ian Rogers13735952014-10-08 12:43:28 -0700137 BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700138
139 // Return the object which comes after obj, while ensuring alignment.
140 static mirror::Object* GetNextObject(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700141 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700142
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800143 // Allocate a new TLAB, returns false if the allocation failed.
Mathieu Chartier90443472015-07-16 20:32:27 -0700144 bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800145
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100146 BumpPointerSpace* AsBumpPointerSpace() override {
Mathieu Chartier7410f292013-11-24 13:17:35 -0800147 return this;
148 }
149
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800150 // Go through all of the blocks and visit the continuous objects.
Andreas Gampe351c4472017-07-12 19:32:55 -0700151 template <typename Visitor>
152 ALWAYS_INLINE void Walk(Visitor&& visitor)
Andreas Gampe0c183382017-07-13 22:26:24 -0700153 REQUIRES_SHARED(Locks::mutator_lock_)
154 REQUIRES(!block_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800155
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100156 accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override;
Ian Rogers6fac4472014-02-25 17:01:10 -0800157
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700158 // Record objects / bytes freed.
159 void RecordFree(int32_t objects, int32_t bytes) {
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700160 objects_allocated_.fetch_sub(objects, std::memory_order_relaxed);
161 bytes_allocated_.fetch_sub(bytes, std::memory_order_relaxed);
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700162 }
163
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100164 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700165 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700166
Mathieu Chartier7410f292013-11-24 13:17:35 -0800167 // Object alignment within the space.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800168 static constexpr size_t kAlignment = 8;
169
Mathieu Chartier590fee92013-09-13 13:46:47 -0700170 protected:
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100171 BumpPointerSpace(const std::string& name, MemMap&& mem_map);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700172
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800173 // Allocate a raw block of bytes.
Mathieu Chartier90443472015-07-16 20:32:27 -0700174 uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_);
175 void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(block_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800176
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800177 // The main block is an unbounded block where objects go when there are no other blocks. This
178 // enables us to maintain tightly packed objects when you are not using thread local buffers for
Mathieu Chartierfc4c27e2014-02-11 11:05:41 -0800179 // allocation. The main block starts at the space Begin().
Mathieu Chartier90443472015-07-16 20:32:27 -0700180 void UpdateMainBlock() REQUIRES(block_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700181
Ian Rogers13735952014-10-08 12:43:28 -0700182 uint8_t* growth_end_;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800183 AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions.
184 AtomicInteger bytes_allocated_; // Accumulated from revoked thread local regions.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800185 Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mathieu Chartierfc4c27e2014-02-11 11:05:41 -0800186 // The objects at the start of the space are stored in the main block. The main block doesn't
187 // have a header, this lets us walk empty spaces which are mprotected.
188 size_t main_block_size_ GUARDED_BY(block_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800189 // The number of blocks in the space, if it is 0 then the space has one long continuous block
190 // which doesn't have an updated header.
191 size_t num_blocks_ GUARDED_BY(block_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700192
193 private:
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800194 struct BlockHeader {
195 size_t size_; // Size of the block in bytes, does not include the header.
196 size_t unused_; // Ensures alignment of kAlignment.
197 };
198
Andreas Gampe575e78c2014-11-03 23:41:03 -0800199 static_assert(sizeof(BlockHeader) % kAlignment == 0,
200 "continuous block must be kAlignment aligned");
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800201
Mathieu Chartier590fee92013-09-13 13:46:47 -0700202 friend class collector::MarkSweep;
203 DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace);
204};
205
206} // namespace space
207} // namespace gc
208} // namespace art
209
210#endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_