blob: 930f557125fb4f74e0010f1cbc089f95db3fc6ac [file] [log] [blame]
Ian Rogers1d54e732013-05-02 21:10:01 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_
Ian Rogers1d54e732013-05-02 21:10:01 -070019
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070020#include "malloc_space.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070021#include "space.h"
22
23namespace art {
24namespace gc {
25
26namespace collector {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080027class MarkSweep;
Ian Rogers1d54e732013-05-02 21:10:01 -070028} // namespace collector
29
30namespace space {
31
Ian Rogers6fac4472014-02-25 17:01:10 -080032// An alloc space is a space where objects may be allocated and garbage collected. Not final as may
Evgenii Stepanov1e133742015-05-20 12:30:59 -070033// be overridden by a MemoryToolMallocSpace.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070034class DlMallocSpace : public MallocSpace {
Ian Rogers1d54e732013-05-02 21:10:01 -070035 public:
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080036 // Create a DlMallocSpace from an existing mem_map.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010037 static DlMallocSpace* CreateFromMemMap(MemMap&& mem_map,
38 const std::string& name,
39 size_t starting_size,
40 size_t initial_size,
41 size_t growth_limit,
42 size_t capacity,
Mathieu Chartier31f44142014-04-08 14:40:03 -070043 bool can_move_objects);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080044
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070045 // Create a DlMallocSpace with the requested sizes. The requested
Ian Rogers1d54e732013-05-02 21:10:01 -070046 // base address is not guaranteed to be granted, if it is required,
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047 // the caller should call Begin on the returned space to confirm the
48 // request was granted.
Vladimir Marko11306592018-10-26 14:22:59 +010049 static DlMallocSpace* Create(const std::string& name,
50 size_t initial_size,
51 size_t growth_limit,
52 size_t capacity,
53 bool can_move_objects);
Ian Rogers1d54e732013-05-02 21:10:01 -070054
Evgenii Stepanov1e133742015-05-20 12:30:59 -070055 // Virtual to allow MemoryToolMallocSpace to intercept.
Roland Levillainf73caca2018-08-24 17:19:07 +010056 mirror::Object* AllocWithGrowth(Thread* self,
57 size_t num_bytes,
58 size_t* bytes_allocated,
59 size_t* usable_size,
60 size_t* bytes_tl_bulk_allocated) override REQUIRES(!lock_);
Evgenii Stepanov1e133742015-05-20 12:30:59 -070061 // Virtual to allow MemoryToolMallocSpace to intercept.
Roland Levillainf73caca2018-08-24 17:19:07 +010062 mirror::Object* Alloc(Thread* self,
63 size_t num_bytes,
64 size_t* bytes_allocated,
65 size_t* usable_size,
66 size_t* bytes_tl_bulk_allocated) override REQUIRES(!lock_) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070067 return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
68 bytes_tl_bulk_allocated);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070069 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -070070 // Virtual to allow MemoryToolMallocSpace to intercept.
Roland Levillainf73caca2018-08-24 17:19:07 +010071 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) override {
Ian Rogers6fac4472014-02-25 17:01:10 -080072 return AllocationSizeNonvirtual(obj, usable_size);
73 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -070074 // Virtual to allow MemoryToolMallocSpace to intercept.
Roland Levillainf73caca2018-08-24 17:19:07 +010075 size_t Free(Thread* self, mirror::Object* ptr) override
Mathieu Chartier90443472015-07-16 20:32:27 -070076 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070077 REQUIRES_SHARED(Locks::mutator_lock_);
Evgenii Stepanov1e133742015-05-20 12:30:59 -070078 // Virtual to allow MemoryToolMallocSpace to intercept.
Roland Levillainf73caca2018-08-24 17:19:07 +010079 size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
Mathieu Chartier90443472015-07-16 20:32:27 -070080 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070081 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -080082
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010083 size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070084 return num_bytes;
Ian Rogers6fac4472014-02-25 17:01:10 -080085 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070086
87 // DlMallocSpaces don't have thread local state.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010088 size_t RevokeThreadLocalBuffers(art::Thread*) override {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070089 return 0U;
90 }
Roland Levillainbbc6e7e2018-08-24 16:58:47 +010091 size_t RevokeAllThreadLocalBuffers() override {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070092 return 0U;
Ian Rogers6fac4472014-02-25 17:01:10 -080093 }
94
95 // Faster non-virtual allocation path.
96 mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070097 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -070098 REQUIRES(!lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -080099
100 // Faster non-virtual allocation size path.
101 size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700102
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700103#ifndef NDEBUG
104 // Override only in the debug build.
Yi Kong39402542019-03-24 02:47:16 -0700105 void CheckMoreCoreForPrecondition() override;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700106#endif
Ian Rogers1d54e732013-05-02 21:10:01 -0700107
108 void* GetMspace() const {
109 return mspace_;
110 }
111
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100112 size_t Trim() override;
Ian Rogers1d54e732013-05-02 21:10:01 -0700113
114 // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
115 // in use, indicated by num_bytes equaling zero.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100116 void Walk(WalkCallback callback, void* arg) override REQUIRES(!lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700117
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -0700118 // Returns the number of bytes that the space has currently obtained from the system. This is
119 // greater or equal to the amount of live data in the space.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100120 size_t GetFootprint() override;
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -0700121
Ian Rogers1d54e732013-05-02 21:10:01 -0700122 // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100123 size_t GetFootprintLimit() override;
Ian Rogers1d54e732013-05-02 21:10:01 -0700124
125 // Set the maximum number of bytes that the heap is allowed to obtain from the system via
126 // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
127 // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100128 void SetFootprintLimit(size_t limit) override;
Ian Rogers1d54e732013-05-02 21:10:01 -0700129
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100130 MallocSpace* CreateInstance(MemMap&& mem_map,
131 const std::string& name,
132 void* allocator,
133 uint8_t* begin,
134 uint8_t* end,
135 uint8_t* limit,
136 size_t growth_limit,
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100137 bool can_move_objects) override;
Ian Rogers1d54e732013-05-02 21:10:01 -0700138
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100139 uint64_t GetBytesAllocated() override;
140 uint64_t GetObjectsAllocated() override;
Ian Rogers1d54e732013-05-02 21:10:01 -0700141
Roland Levillainf73caca2018-08-24 17:19:07 +0100142 void Clear() override;
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700143
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100144 bool IsDlMallocSpace() const override {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700145 return true;
146 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800147
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100148 DlMallocSpace* AsDlMallocSpace() override {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700149 return this;
150 }
151
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100152 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700153 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700154
Ian Rogers1d54e732013-05-02 21:10:01 -0700155 protected:
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100156 DlMallocSpace(MemMap&& mem_map,
157 size_t initial_size,
158 const std::string& name,
159 void* mspace,
160 uint8_t* begin,
161 uint8_t* end,
162 uint8_t* limit,
163 size_t growth_limit,
164 bool can_move_objects,
165 size_t starting_size);
Ian Rogers1d54e732013-05-02 21:10:01 -0700166
167 private:
Ian Rogers6fac4472014-02-25 17:01:10 -0800168 mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700169 size_t* usable_size,
170 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700171 REQUIRES(lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700172
Ian Rogers6fac4472014-02-25 17:01:10 -0800173 void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100174 size_t /*maximum_size*/, bool /*low_memory_mode*/) override {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700175 return CreateMspace(base, morecore_start, initial_size);
176 }
177 static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700178
Ian Rogers1d54e732013-05-02 21:10:01 -0700179 // The boundary tag overhead.
Ian Rogers13735952014-10-08 12:43:28 -0700180 static const size_t kChunkOverhead = sizeof(intptr_t);
Ian Rogers1d54e732013-05-02 21:10:01 -0700181
Ian Rogers6fac4472014-02-25 17:01:10 -0800182 // Underlying malloc space.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700183 void* mspace_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700184
Ian Rogers1d54e732013-05-02 21:10:01 -0700185 friend class collector::MarkSweep;
186
187 DISALLOW_COPY_AND_ASSIGN(DlMallocSpace);
188};
189
190} // namespace space
191} // namespace gc
192} // namespace art
193
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700194#endif // ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_