blob: a52b92b084aa175c09572a56234f9adfa69673c2 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
19
20#include "space.h"
21
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -070022#include <iostream>
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -080023#include <valgrind.h>
24#include <memcheck/memcheck.h>
25
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070026namespace art {
27namespace gc {
28
29namespace collector {
30 class MarkSweep;
31} // namespace collector
32
33namespace space {
34
Mathieu Chartiera1602f22014-01-13 17:19:19 -080035class ZygoteSpace;
36
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070037// TODO: Remove define macro
38#define CHECK_MEMORY_CALL(call, args, what) \
39 do { \
40 int rc = call args; \
41 if (UNLIKELY(rc != 0)) { \
42 errno = rc; \
43 PLOG(FATAL) << # call << " failed for " << what; \
44 } \
45 } while (false)
46
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047// A common parent of DlMallocSpace and RosAllocSpace.
48class MallocSpace : public ContinuousMemMapAllocSpace {
49 public:
50 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
51
52 SpaceType GetType() const {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080053 return kSpaceTypeMallocSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070054 }
55
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070056 // Allocate num_bytes allowing the underlying space to grow.
Ian Rogers6fac4472014-02-25 17:01:10 -080057 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
58 size_t* bytes_allocated, size_t* usable_size) = 0;
59 // Allocate num_bytes without allowing the underlying space to grow.
60 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
61 size_t* usable_size) = 0;
62 // Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
63 // amount of the storage space that may be used by obj.
64 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
Ian Rogersef7d42f2014-01-06 12:55:46 -080065 virtual size_t Free(Thread* self, mirror::Object* ptr)
66 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
67 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
68 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070069
70#ifndef NDEBUG
71 virtual void CheckMoreCoreForPrecondition() {} // to be overridden in the debug build.
72#else
73 void CheckMoreCoreForPrecondition() {} // no-op in the non-debug build.
74#endif
75
76 void* MoreCore(intptr_t increment);
77
78 // Hands unused pages back to the system.
79 virtual size_t Trim() = 0;
80
81 // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
82 // in use, indicated by num_bytes equaling zero.
83 virtual void Walk(WalkCallback callback, void* arg) = 0;
84
85 // Returns the number of bytes that the space has currently obtained from the system. This is
86 // greater or equal to the amount of live data in the space.
87 virtual size_t GetFootprint() = 0;
88
89 // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
90 virtual size_t GetFootprintLimit() = 0;
91
92 // Set the maximum number of bytes that the heap is allowed to obtain from the system via
93 // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
94 // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
95 virtual void SetFootprintLimit(size_t limit) = 0;
96
97 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
98 // maximum reserved size of the heap.
99 void ClearGrowthLimit() {
100 growth_limit_ = NonGrowthLimitCapacity();
101 }
102
103 // Override capacity so that we only return the possibly limited capacity
104 size_t Capacity() const {
105 return growth_limit_;
106 }
107
108 // The total amount of memory reserved for the alloc space.
109 size_t NonGrowthLimitCapacity() const {
110 return GetMemMap()->Size();
111 }
112
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700113 void Dump(std::ostream& os) const;
114
115 void SetGrowthLimit(size_t growth_limit);
116
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700117 virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700118 byte* begin, byte* end, byte* limit, size_t growth_limit,
119 bool can_move_objects) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700120
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800121 // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
122 // the low memory mode argument specifies that the heap wishes the created space to be more
123 // aggressive in releasing unused pages. Invalidates the space its called on.
124 ZygoteSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode,
125 MallocSpace** out_malloc_space) NO_THREAD_SAFETY_ANALYSIS;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700126 virtual uint64_t GetBytesAllocated() = 0;
127 virtual uint64_t GetObjectsAllocated() = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700128
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700129 // Returns the class of a recently freed object.
130 mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
131
Mathieu Chartier31f44142014-04-08 14:40:03 -0700132 bool CanMoveObjects() const OVERRIDE {
133 return can_move_objects_;
134 }
135
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700136 protected:
137 MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700138 byte* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
139 size_t starting_size, size_t initial_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700140
141 static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
142 size_t* growth_limit, size_t* capacity, byte* requested_begin);
143
Ian Rogers6fac4472014-02-25 17:01:10 -0800144 // When true the low memory mode argument specifies that the heap wishes the created allocator to
145 // be more aggressive in releasing unused pages.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800146 virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800147 size_t maximum_size, bool low_memory_mode) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700148
Mathieu Chartier661974a2014-01-09 11:23:53 -0800149 virtual void RegisterRecentFree(mirror::Object* ptr)
Ian Rogersef7d42f2014-01-06 12:55:46 -0800150 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
151 EXCLUSIVE_LOCKS_REQUIRED(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700152
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700153 virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800154 return &SweepCallback;
155 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700156
157 // Recent allocation buffer.
158 static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0;
159 static constexpr size_t kRecentFreeMask = kRecentFreeCount - 1;
160 std::pair<const mirror::Object*, mirror::Class*> recent_freed_objects_[kRecentFreeCount];
161 size_t recent_free_pos_;
162
163 static size_t bitmap_index_;
164
165 // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
166 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
167
168 // The capacity of the alloc space until such time that ClearGrowthLimit is called.
169 // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
170 // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
171 // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
172 // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
173 // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
174 // one time by a call to ClearGrowthLimit.
175 size_t growth_limit_;
176
Mathieu Chartier31f44142014-04-08 14:40:03 -0700177 // True if objects in the space are movable.
178 const bool can_move_objects_;
179
180 // Starting and initial sized, used when you reset the space.
181 const size_t starting_size_;
182 const size_t initial_size_;
183
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800184 private:
Ian Rogersef7d42f2014-01-06 12:55:46 -0800185 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
186 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800187
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700188 DISALLOW_COPY_AND_ASSIGN(MallocSpace);
189};
190
191} // namespace space
192} // namespace gc
193} // namespace art
194
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800195#endif // ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_