blob: d7db561f61cd233251555c10ab729083e7b1cb6d [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Ian Rogers1d54e732013-05-02 21:10:01 -070017#include "large_object_space.h"
18
Elliott Hughes07ed66b2012-12-12 18:34:25 -080019#include "base/logging.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080020#include "base/stl_util.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070021#include "UniquePtr.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070022#include "image.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070023#include "os.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080024#include "thread.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070025#include "utils.h"
26
27namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070028namespace gc {
29namespace space {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070030
31void LargeObjectSpace::SwapBitmaps() {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080032 live_objects_.swap(mark_objects_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070033 // Swap names to get more descriptive diagnostics.
34 std::string temp_name = live_objects_->GetName();
35 live_objects_->SetName(mark_objects_->GetName());
36 mark_objects_->SetName(temp_name);
37}
38
39LargeObjectSpace::LargeObjectSpace(const std::string& name)
40 : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
41 num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
42 total_objects_allocated_(0) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070043}
44
45
46void LargeObjectSpace::CopyLiveToMarked() {
47 mark_objects_->CopyFrom(*live_objects_.get());
48}
49
50LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
51 : LargeObjectSpace(name),
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -070052 lock_("large object map space lock", kAllocSpaceLock) {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070053
54LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
55 return new LargeObjectMapSpace(name);
56}
57
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080058mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
Ian Rogersa40307e2013-02-22 11:32:44 -080059 MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
60 PROT_READ | PROT_WRITE);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070061 if (mem_map == NULL) {
62 return NULL;
63 }
64 MutexLock mu(self, lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080065 mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070066 large_objects_.push_back(obj);
67 mem_maps_.Put(obj, mem_map);
68 size_t allocation_size = mem_map->Size();
69 num_bytes_allocated_ += allocation_size;
70 total_bytes_allocated_ += allocation_size;
71 ++num_objects_allocated_;
72 ++total_objects_allocated_;
73 return obj;
74}
75
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080076size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070077 MutexLock mu(self, lock_);
78 MemMaps::iterator found = mem_maps_.find(ptr);
79 CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
80 DCHECK_GE(num_bytes_allocated_, found->second->Size());
81 size_t allocation_size = found->second->Size();
82 num_bytes_allocated_ -= allocation_size;
83 --num_objects_allocated_;
84 delete found->second;
85 mem_maps_.erase(found);
86 return allocation_size;
87}
88
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080089size_t LargeObjectMapSpace::AllocationSize(const mirror::Object* obj) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070090 MutexLock mu(Thread::Current(), lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080091 MemMaps::iterator found = mem_maps_.find(const_cast<mirror::Object*>(obj));
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070092 CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
93 return found->second->Size();
94}
95
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080096size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070097 size_t total = 0;
98 for (size_t i = 0; i < num_ptrs; ++i) {
99 if (kDebugSpaces) {
100 CHECK(Contains(ptrs[i]));
101 }
102 total += Free(self, ptrs[i]);
103 }
104 return total;
105}
106
107void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
108 MutexLock mu(Thread::Current(), lock_);
109 for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
110 MemMap* mem_map = it->second;
111 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
112 callback(NULL, NULL, 0, arg);
113 }
114}
115
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800116bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700117 Thread* self = Thread::Current();
118 if (lock_.IsExclusiveHeld(self)) {
119 // We hold lock_ so do the check.
120 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
121 } else {
122 MutexLock mu(self, lock_);
123 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
124 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700125}
126
127FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
Brian Carlstrom42748892013-07-18 18:04:08 -0700128 CHECK_EQ(size % kAlignment, 0U);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700129 MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
130 PROT_READ | PROT_WRITE);
131 CHECK(mem_map != NULL) << "Failed to allocate large object space mem map";
132 return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
133}
134
135FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
136 : LargeObjectSpace(name),
137 begin_(begin),
138 end_(end),
139 mem_map_(mem_map),
140 lock_("free list space lock", kAllocSpaceLock) {
141 chunks_.resize(Size() / kAlignment + 1);
142 // Add a dummy chunk so we don't need to handle chunks having no next chunk.
143 chunks_.back().SetSize(kAlignment, false);
144 // Start out with one large free chunk.
145 AddFreeChunk(begin_, end_ - begin_, NULL);
146}
147
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -0700148FreeListSpace::~FreeListSpace() {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700149
150void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
151 Chunk* chunk = ChunkFromAddr(address);
152 chunk->SetSize(size, true);
153 chunk->SetPrevious(previous);
154 Chunk* next_chunk = GetNextChunk(chunk);
155 next_chunk->SetPrevious(chunk);
156 free_chunks_.insert(chunk);
157}
158
159FreeListSpace::Chunk* FreeListSpace::ChunkFromAddr(void* address) {
160 size_t offset = reinterpret_cast<byte*>(address) - Begin();
161 DCHECK(IsAligned<kAlignment>(offset));
162 DCHECK_LT(offset, Size());
163 return &chunks_[offset / kAlignment];
164}
165
166void* FreeListSpace::AddrFromChunk(Chunk* chunk) {
167 return reinterpret_cast<void*>(Begin() + (chunk - &chunks_.front()) * kAlignment);
168}
169
170void FreeListSpace::RemoveFreeChunk(Chunk* chunk) {
171 // TODO: C++0x
172 // TODO: Improve performance, this might be slow.
173 std::pair<FreeChunks::iterator, FreeChunks::iterator> range = free_chunks_.equal_range(chunk);
174 for (FreeChunks::iterator it = range.first; it != range.second; ++it) {
175 if (*it == chunk) {
176 free_chunks_.erase(it);
177 return;
178 }
179 }
180}
181
182void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
183 MutexLock mu(Thread::Current(), lock_);
184 for (Chunk* chunk = &chunks_.front(); chunk < &chunks_.back(); ) {
185 if (!chunk->IsFree()) {
186 size_t size = chunk->GetSize();
187 void* begin = AddrFromChunk(chunk);
188 void* end = reinterpret_cast<void*>(reinterpret_cast<byte*>(begin) + size);
189 callback(begin, end, size, arg);
190 callback(NULL, NULL, 0, arg);
191 }
192 chunk = GetNextChunk(chunk);
193 }
194}
195
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800196size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700197 MutexLock mu(self, lock_);
198 CHECK(Contains(obj));
199 // Check adjacent chunks to see if we need to combine.
200 Chunk* chunk = ChunkFromAddr(obj);
201 CHECK(!chunk->IsFree());
202
203 size_t allocation_size = chunk->GetSize();
Ian Rogers22a20862013-03-16 16:34:57 -0700204 if (kIsDebugBuild) {
205 memset(obj, 0xEB, allocation_size);
206 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700207 madvise(obj, allocation_size, MADV_DONTNEED);
208 num_objects_allocated_--;
209 num_bytes_allocated_ -= allocation_size;
210 Chunk* prev = chunk->GetPrevious();
211 Chunk* next = GetNextChunk(chunk);
212
213 // Combine any adjacent free chunks
214 size_t extra_size = chunk->GetSize();
215 if (next->IsFree()) {
216 extra_size += next->GetSize();
217 RemoveFreeChunk(next);
218 }
219 if (prev != NULL && prev->IsFree()) {
220 RemoveFreeChunk(prev);
221 AddFreeChunk(AddrFromChunk(prev), prev->GetSize() + extra_size, prev->GetPrevious());
222 } else {
223 AddFreeChunk(AddrFromChunk(chunk), extra_size, prev);
224 }
225 return allocation_size;
226}
227
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800228bool FreeListSpace::Contains(const mirror::Object* obj) const {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700229 return mem_map_->HasAddress(obj);
230}
231
232FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) {
233 return chunk + chunk->GetSize() / kAlignment;
234}
235
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800236size_t FreeListSpace::AllocationSize(const mirror::Object* obj) {
237 Chunk* chunk = ChunkFromAddr(const_cast<mirror::Object*>(obj));
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700238 CHECK(!chunk->IsFree());
239 return chunk->GetSize();
240}
241
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800242mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700243 MutexLock mu(self, lock_);
244 num_bytes = RoundUp(num_bytes, kAlignment);
245 Chunk temp;
246 temp.SetSize(num_bytes);
247 // Find the smallest chunk at least num_bytes in size.
248 FreeChunks::iterator found = free_chunks_.lower_bound(&temp);
249 if (found == free_chunks_.end()) {
250 // Out of memory, or too much fragmentation.
251 return NULL;
252 }
253 Chunk* chunk = *found;
254 free_chunks_.erase(found);
255 CHECK(chunk->IsFree());
256 void* addr = AddrFromChunk(chunk);
257 size_t chunk_size = chunk->GetSize();
258 chunk->SetSize(num_bytes);
259 if (chunk_size > num_bytes) {
260 // Split the chunk into two chunks.
261 Chunk* new_chunk = GetNextChunk(chunk);
262 AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk);
263 }
264
265 num_objects_allocated_++;
266 total_objects_allocated_++;
267 num_bytes_allocated_ += num_bytes;
268 total_bytes_allocated_ += num_bytes;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800269 return reinterpret_cast<mirror::Object*>(addr);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700270}
271
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700272void FreeListSpace::Dump(std::ostream& os) const {
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700273 os << GetName() << " -"
274 << " begin: " << reinterpret_cast<void*>(Begin())
275 << " end: " << reinterpret_cast<void*>(End());
276}
277
Ian Rogers1d54e732013-05-02 21:10:01 -0700278} // namespace space
279} // namespace gc
280} // namespace art