blob: f85ea4635b9ffef39127f873edbe930a0af76f6a [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
19
20#include "space.h"
21
Ian Rogerscf7f1912014-10-22 22:06:39 -070022#include <ostream>
Evgenii Stepanov1e133742015-05-20 12:30:59 -070023#include "base/memory_tool.h"
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -080024
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070025namespace art {
26namespace gc {
27
28namespace collector {
29 class MarkSweep;
30} // namespace collector
31
32namespace space {
33
Mathieu Chartiera1602f22014-01-13 17:19:19 -080034class ZygoteSpace;
35
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070036// TODO: Remove define macro
37#define CHECK_MEMORY_CALL(call, args, what) \
38 do { \
39 int rc = call args; \
40 if (UNLIKELY(rc != 0)) { \
41 errno = rc; \
Chih-Hung Hsiehfba39972016-05-11 11:26:48 -070042 PLOG(FATAL) << # call << " failed for " << (what); \
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070043 } \
44 } while (false)
45
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070046// A common parent of DlMallocSpace and RosAllocSpace.
47class MallocSpace : public ContinuousMemMapAllocSpace {
48 public:
49 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
50
51 SpaceType GetType() const {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080052 return kSpaceTypeMallocSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070053 }
54
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070055 // Allocate num_bytes allowing the underlying space to grow.
Ian Rogers6fac4472014-02-25 17:01:10 -080056 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070057 size_t* bytes_allocated, size_t* usable_size,
58 size_t* bytes_tl_bulk_allocated) = 0;
Ian Rogers6fac4472014-02-25 17:01:10 -080059 // Allocate num_bytes without allowing the underlying space to grow.
60 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070061 size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
Mathieu Chartier2cebb242015-04-21 16:50:40 -070062 // Return the storage space required by obj. If usable_size isn't null then it is set to the
Ian Rogers6fac4472014-02-25 17:01:10 -080063 // amount of the storage space that may be used by obj.
64 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
Ian Rogersef7d42f2014-01-06 12:55:46 -080065 virtual size_t Free(Thread* self, mirror::Object* ptr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070066 REQUIRES_SHARED(Locks::mutator_lock_) = 0;
Ian Rogersef7d42f2014-01-06 12:55:46 -080067 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070068 REQUIRES_SHARED(Locks::mutator_lock_) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070069
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070070 // Returns the maximum bytes that could be allocated for the given
71 // size in bulk, that is the maximum value for the
72 // bytes_allocated_bulk out param returned by MallocSpace::Alloc().
73 virtual size_t MaxBytesBulkAllocatedFor(size_t num_bytes) = 0;
74
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070075#ifndef NDEBUG
76 virtual void CheckMoreCoreForPrecondition() {} // to be overridden in the debug build.
77#else
78 void CheckMoreCoreForPrecondition() {} // no-op in the non-debug build.
79#endif
80
81 void* MoreCore(intptr_t increment);
82
83 // Hands unused pages back to the system.
84 virtual size_t Trim() = 0;
85
86 // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
87 // in use, indicated by num_bytes equaling zero.
88 virtual void Walk(WalkCallback callback, void* arg) = 0;
89
90 // Returns the number of bytes that the space has currently obtained from the system. This is
91 // greater or equal to the amount of live data in the space.
92 virtual size_t GetFootprint() = 0;
93
94 // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
95 virtual size_t GetFootprintLimit() = 0;
96
97 // Set the maximum number of bytes that the heap is allowed to obtain from the system via
98 // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
99 // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
100 virtual void SetFootprintLimit(size_t limit) = 0;
101
102 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
103 // maximum reserved size of the heap.
104 void ClearGrowthLimit() {
105 growth_limit_ = NonGrowthLimitCapacity();
106 }
107
108 // Override capacity so that we only return the possibly limited capacity
109 size_t Capacity() const {
110 return growth_limit_;
111 }
112
113 // The total amount of memory reserved for the alloc space.
114 size_t NonGrowthLimitCapacity() const {
115 return GetMemMap()->Size();
116 }
117
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800118 // Change the non growth limit capacity by shrinking or expanding the map. Currently, only
119 // shrinking is supported.
120 void ClampGrowthLimit();
121
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700122 void Dump(std::ostream& os) const;
123
124 void SetGrowthLimit(size_t growth_limit);
125
Andreas Gamped7576322014-10-24 22:13:45 -0700126 virtual MallocSpace* CreateInstance(MemMap* mem_map, const std::string& name, void* allocator,
127 uint8_t* begin, uint8_t* end, uint8_t* limit,
128 size_t growth_limit, bool can_move_objects) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700129
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800130 // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
131 // the low memory mode argument specifies that the heap wishes the created space to be more
132 // aggressive in releasing unused pages. Invalidates the space its called on.
133 ZygoteSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode,
134 MallocSpace** out_malloc_space) NO_THREAD_SAFETY_ANALYSIS;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700135 virtual uint64_t GetBytesAllocated() = 0;
136 virtual uint64_t GetObjectsAllocated() = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700137
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700138 // Returns the class of a recently freed object.
139 mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
140
Mathieu Chartier31f44142014-04-08 14:40:03 -0700141 bool CanMoveObjects() const OVERRIDE {
142 return can_move_objects_;
143 }
144
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700145 void DisableMovingObjects() {
146 can_move_objects_ = false;
147 }
148
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700149 protected:
Ian Rogers13735952014-10-08 12:43:28 -0700150 MallocSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end,
151 uint8_t* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700152 size_t starting_size, size_t initial_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700153
154 static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
Ian Rogers13735952014-10-08 12:43:28 -0700155 size_t* growth_limit, size_t* capacity, uint8_t* requested_begin);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700156
Ian Rogers6fac4472014-02-25 17:01:10 -0800157 // When true the low memory mode argument specifies that the heap wishes the created allocator to
158 // be more aggressive in releasing unused pages.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800159 virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800160 size_t maximum_size, bool low_memory_mode) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700161
Mathieu Chartier661974a2014-01-09 11:23:53 -0800162 virtual void RegisterRecentFree(mirror::Object* ptr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700163 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700164 REQUIRES(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700165
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700166 virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800167 return &SweepCallback;
168 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700169
170 // Recent allocation buffer.
171 static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0;
172 static constexpr size_t kRecentFreeMask = kRecentFreeCount - 1;
173 std::pair<const mirror::Object*, mirror::Class*> recent_freed_objects_[kRecentFreeCount];
174 size_t recent_free_pos_;
175
176 static size_t bitmap_index_;
177
178 // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
179 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
180
181 // The capacity of the alloc space until such time that ClearGrowthLimit is called.
182 // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
183 // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
184 // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
185 // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
186 // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
187 // one time by a call to ClearGrowthLimit.
188 size_t growth_limit_;
189
Mathieu Chartier31f44142014-04-08 14:40:03 -0700190 // True if objects in the space are movable.
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700191 bool can_move_objects_;
Mathieu Chartier31f44142014-04-08 14:40:03 -0700192
193 // Starting and initial sized, used when you reset the space.
194 const size_t starting_size_;
195 const size_t initial_size_;
196
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800197 private:
Ian Rogersef7d42f2014-01-06 12:55:46 -0800198 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700199 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800200
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700201 DISALLOW_COPY_AND_ASSIGN(MallocSpace);
202};
203
204} // namespace space
205} // namespace gc
206} // namespace art
207
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800208#endif // ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_