blob: 6f49fbf203e0cda003043cc58d7a2141bd59acae [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
19
20#include "space.h"
21
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -070022#include <iostream>
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -080023#include <valgrind.h>
24#include <memcheck/memcheck.h>
25
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070026namespace art {
27namespace gc {
28
29namespace collector {
30 class MarkSweep;
31} // namespace collector
32
33namespace space {
34
Mathieu Chartiera1602f22014-01-13 17:19:19 -080035class ZygoteSpace;
36
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070037// TODO: Remove define macro
38#define CHECK_MEMORY_CALL(call, args, what) \
39 do { \
40 int rc = call args; \
41 if (UNLIKELY(rc != 0)) { \
42 errno = rc; \
43 PLOG(FATAL) << # call << " failed for " << what; \
44 } \
45 } while (false)
46
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047// A common parent of DlMallocSpace and RosAllocSpace.
48class MallocSpace : public ContinuousMemMapAllocSpace {
49 public:
50 typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
51
52 SpaceType GetType() const {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080053 return kSpaceTypeMallocSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070054 }
55
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070056 // Allocate num_bytes allowing the underlying space to grow.
Ian Rogers6fac4472014-02-25 17:01:10 -080057 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
58 size_t* bytes_allocated, size_t* usable_size) = 0;
59 // Allocate num_bytes without allowing the underlying space to grow.
60 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
61 size_t* usable_size) = 0;
62 // Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
63 // amount of the storage space that may be used by obj.
64 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
Ian Rogersef7d42f2014-01-06 12:55:46 -080065 virtual size_t Free(Thread* self, mirror::Object* ptr)
66 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
67 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
68 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070069
70#ifndef NDEBUG
71 virtual void CheckMoreCoreForPrecondition() {} // to be overridden in the debug build.
72#else
73 void CheckMoreCoreForPrecondition() {} // no-op in the non-debug build.
74#endif
75
76 void* MoreCore(intptr_t increment);
77
78 // Hands unused pages back to the system.
79 virtual size_t Trim() = 0;
80
81 // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
82 // in use, indicated by num_bytes equaling zero.
83 virtual void Walk(WalkCallback callback, void* arg) = 0;
84
85 // Returns the number of bytes that the space has currently obtained from the system. This is
86 // greater or equal to the amount of live data in the space.
87 virtual size_t GetFootprint() = 0;
88
89 // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
90 virtual size_t GetFootprintLimit() = 0;
91
92 // Set the maximum number of bytes that the heap is allowed to obtain from the system via
93 // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
94 // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
95 virtual void SetFootprintLimit(size_t limit) = 0;
96
97 // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
98 // maximum reserved size of the heap.
99 void ClearGrowthLimit() {
100 growth_limit_ = NonGrowthLimitCapacity();
101 }
102
103 // Override capacity so that we only return the possibly limited capacity
104 size_t Capacity() const {
105 return growth_limit_;
106 }
107
108 // The total amount of memory reserved for the alloc space.
109 size_t NonGrowthLimitCapacity() const {
110 return GetMemMap()->Size();
111 }
112
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700113 void Dump(std::ostream& os) const;
114
115 void SetGrowthLimit(size_t growth_limit);
116
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700117 virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700118 byte* begin, byte* end, byte* limit, size_t growth_limit,
119 bool can_move_objects) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700120
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800121 // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
122 // the low memory mode argument specifies that the heap wishes the created space to be more
123 // aggressive in releasing unused pages. Invalidates the space its called on.
124 ZygoteSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode,
125 MallocSpace** out_malloc_space) NO_THREAD_SAFETY_ANALYSIS;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700126 virtual uint64_t GetBytesAllocated() = 0;
127 virtual uint64_t GetObjectsAllocated() = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700128
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700129 // Returns the class of a recently freed object.
130 mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
131
Mathieu Chartier31f44142014-04-08 14:40:03 -0700132 bool CanMoveObjects() const OVERRIDE {
133 return can_move_objects_;
134 }
135
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700136 virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
137
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700138 protected:
139 MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700140 byte* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
141 size_t starting_size, size_t initial_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700142
143 static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
144 size_t* growth_limit, size_t* capacity, byte* requested_begin);
145
Ian Rogers6fac4472014-02-25 17:01:10 -0800146 // When true the low memory mode argument specifies that the heap wishes the created allocator to
147 // be more aggressive in releasing unused pages.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800148 virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800149 size_t maximum_size, bool low_memory_mode) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700150
Mathieu Chartier661974a2014-01-09 11:23:53 -0800151 virtual void RegisterRecentFree(mirror::Object* ptr)
Ian Rogersef7d42f2014-01-06 12:55:46 -0800152 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
153 EXCLUSIVE_LOCKS_REQUIRED(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700154
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700155 virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800156 return &SweepCallback;
157 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700158
159 // Recent allocation buffer.
160 static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0;
161 static constexpr size_t kRecentFreeMask = kRecentFreeCount - 1;
162 std::pair<const mirror::Object*, mirror::Class*> recent_freed_objects_[kRecentFreeCount];
163 size_t recent_free_pos_;
164
165 static size_t bitmap_index_;
166
167 // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
168 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
169
170 // The capacity of the alloc space until such time that ClearGrowthLimit is called.
171 // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
172 // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
173 // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
174 // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
175 // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
176 // one time by a call to ClearGrowthLimit.
177 size_t growth_limit_;
178
Mathieu Chartier31f44142014-04-08 14:40:03 -0700179 // True if objects in the space are movable.
180 const bool can_move_objects_;
181
182 // Starting and initial sized, used when you reset the space.
183 const size_t starting_size_;
184 const size_t initial_size_;
185
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800186 private:
Ian Rogersef7d42f2014-01-06 12:55:46 -0800187 static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
188 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800189
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700190 DISALLOW_COPY_AND_ASSIGN(MallocSpace);
191};
192
193} // namespace space
194} // namespace gc
195} // namespace art
196
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800197#endif // ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_