blob: e6d1d180fe60403d11c01b858380811948243407 [file] [log] [blame]
The Android Open Source Project7c1b96a2008-10-21 07:00:00 -07001/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "MemoryDealer"
18
19#include <utils/MemoryDealer.h>
20
21#include <utils/Log.h>
22#include <utils/IPCThreadState.h>
23#include <utils/SortedVector.h>
24#include <utils/String8.h>
25#include <utils/MemoryBase.h>
26
27#include <stdint.h>
28#include <stdio.h>
29#include <stdlib.h>
30#include <fcntl.h>
31#include <unistd.h>
32#include <errno.h>
33#include <string.h>
34
35#include <sys/stat.h>
36#include <sys/types.h>
37#include <sys/mman.h>
38#include <sys/file.h>
39
40namespace android {
41
42
43// ----------------------------------------------------------------------------
44
45class SimpleMemory : public MemoryBase {
46public:
47 SimpleMemory(const sp<IMemoryHeap>& heap, ssize_t offset, size_t size);
48 virtual ~SimpleMemory();
49};
50
51
52// ----------------------------------------------------------------------------
53
54MemoryDealer::Allocation::Allocation(
55 const sp<MemoryDealer>& dealer, ssize_t offset, size_t size,
56 const sp<IMemory>& memory)
57 : mDealer(dealer), mOffset(offset), mSize(size), mMemory(memory)
58{
59}
60
61MemoryDealer::Allocation::~Allocation()
62{
63 if (mSize) {
64 /* NOTE: it's VERY important to not free allocations of size 0 because
65 * they're special as they don't have any record in the allocator
66 * and could alias some real allocation (their offset is zero). */
67 mDealer->deallocate(mOffset);
68 }
69}
70
71sp<IMemoryHeap> MemoryDealer::Allocation::getMemory(
72 ssize_t* offset, size_t* size) const
73{
74 return mMemory->getMemory(offset, size);
75}
76
77// ----------------------------------------------------------------------------
78
79MemoryDealer::MemoryDealer(size_t size, uint32_t flags, const char* name)
80 : mHeap(new SharedHeap(size, flags, name)),
81 mAllocator(new SimpleBestFitAllocator(size))
82{
83}
84
85MemoryDealer::MemoryDealer(const sp<HeapInterface>& heap)
86 : mHeap(heap),
87 mAllocator(new SimpleBestFitAllocator(heap->virtualSize()))
88{
89}
90
91MemoryDealer::MemoryDealer( const sp<HeapInterface>& heap,
92 const sp<AllocatorInterface>& allocator)
93 : mHeap(heap), mAllocator(allocator)
94{
95}
96
97MemoryDealer::~MemoryDealer()
98{
99}
100
101sp<IMemory> MemoryDealer::allocate(size_t size, uint32_t flags)
102{
103 sp<IMemory> memory;
104 const ssize_t offset = allocator()->allocate(size, flags);
105 if (offset >= 0) {
106 sp<IMemory> new_memory = heap()->mapMemory(offset, size);
107 if (new_memory != 0) {
108 memory = new Allocation(this, offset, size, new_memory);
109 } else {
110 LOGE("couldn't map [%8x, %d]", offset, size);
111 if (size) {
112 /* NOTE: it's VERY important to not free allocations of size 0
113 * because they're special as they don't have any record in the
114 * allocator and could alias some real allocation
115 * (their offset is zero). */
116 allocator()->deallocate(offset);
117 }
118 }
119 }
120 return memory;
121}
122
123void MemoryDealer::deallocate(size_t offset)
124{
125 allocator()->deallocate(offset);
126}
127
128void MemoryDealer::dump(const char* what, uint32_t flags) const
129{
130 allocator()->dump(what, flags);
131}
132
133const sp<HeapInterface>& MemoryDealer::heap() const {
134 return mHeap;
135}
136
137const sp<AllocatorInterface>& MemoryDealer::allocator() const {
138 return mAllocator;
139}
140
141// ----------------------------------------------------------------------------
142
143// align all the memory blocks on a cache-line boundary
144const int SimpleBestFitAllocator::kMemoryAlign = 32;
145
146SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
147{
148 size_t pagesize = getpagesize();
149 mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
150
151 chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
152 mList.insertHead(node);
153}
154
155SimpleBestFitAllocator::~SimpleBestFitAllocator()
156{
157 while(!mList.isEmpty()) {
158 delete mList.remove(mList.head());
159 }
160}
161
162size_t SimpleBestFitAllocator::size() const
163{
164 return mHeapSize;
165}
166
167size_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
168{
169 Mutex::Autolock _l(mLock);
170 ssize_t offset = alloc(size, flags);
171 return offset;
172}
173
174status_t SimpleBestFitAllocator::deallocate(size_t offset)
175{
176 Mutex::Autolock _l(mLock);
177 chunk_t const * const freed = dealloc(offset);
178 if (freed) {
179 return NO_ERROR;
180 }
181 return NAME_NOT_FOUND;
182}
183
184ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags)
185{
186 if (size == 0) {
187 return 0;
188 }
189 size = (size + kMemoryAlign-1) / kMemoryAlign;
190 chunk_t* free_chunk = 0;
191 chunk_t* cur = mList.head();
192
193 size_t pagesize = getpagesize();
194 while (cur) {
195 int extra = 0;
196 if (flags & PAGE_ALIGNED)
197 extra = ( -cur->start & ((pagesize/kMemoryAlign)-1) ) ;
198
199 // best fit
200 if (cur->free && (cur->size >= (size+extra))) {
201 if ((!free_chunk) || (cur->size < free_chunk->size)) {
202 free_chunk = cur;
203 }
204 if (cur->size == size) {
205 break;
206 }
207 }
208 cur = cur->next;
209 }
210
211 if (free_chunk) {
212 const size_t free_size = free_chunk->size;
213 free_chunk->free = 0;
214 free_chunk->size = size;
215 if (free_size > size) {
216 int extra = 0;
217 if (flags & PAGE_ALIGNED)
218 extra = ( -free_chunk->start & ((pagesize/kMemoryAlign)-1) ) ;
219 if (extra) {
220 chunk_t* split = new chunk_t(free_chunk->start, extra);
221 free_chunk->start += extra;
222 mList.insertBefore(free_chunk, split);
223 }
224
225 LOGE_IF((flags&PAGE_ALIGNED) &&
226 ((free_chunk->start*kMemoryAlign)&(pagesize-1)),
227 "PAGE_ALIGNED requested, but page is not aligned!!!");
228
229 const ssize_t tail_free = free_size - (size+extra);
230 if (tail_free > 0) {
231 chunk_t* split = new chunk_t(
232 free_chunk->start + free_chunk->size, tail_free);
233 mList.insertAfter(free_chunk, split);
234 }
235 }
236 return (free_chunk->start)*kMemoryAlign;
237 }
238 return NO_MEMORY;
239}
240
241SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start)
242{
243 start = start / kMemoryAlign;
244 chunk_t* cur = mList.head();
245 while (cur) {
246 if (cur->start == start) {
247 LOG_FATAL_IF(cur->free,
248 "block at offset 0x%08lX of size 0x%08lX already freed",
249 cur->start*kMemoryAlign, cur->size*kMemoryAlign);
250
251 // merge freed blocks together
252 chunk_t* freed = cur;
253 cur->free = 1;
254 do {
255 chunk_t* const p = cur->prev;
256 chunk_t* const n = cur->next;
257 if (p && (p->free || !cur->size)) {
258 freed = p;
259 p->size += cur->size;
260 mList.remove(cur);
261 delete cur;
262 }
263 cur = n;
264 } while (cur && cur->free);
265
266 #ifndef NDEBUG
267 if (!freed->free) {
268 dump_l("dealloc (!freed->free)");
269 }
270 #endif
271 LOG_FATAL_IF(!freed->free,
272 "freed block at offset 0x%08lX of size 0x%08lX is not free!",
273 freed->start * kMemoryAlign, freed->size * kMemoryAlign);
274
275 return freed;
276 }
277 cur = cur->next;
278 }
279 return 0;
280}
281
282void SimpleBestFitAllocator::dump(const char* what, uint32_t flags) const
283{
284 Mutex::Autolock _l(mLock);
285 dump_l(what, flags);
286}
287
288void SimpleBestFitAllocator::dump_l(const char* what, uint32_t flags) const
289{
290 String8 result;
291 dump_l(result, what, flags);
292 LOGD("%s", result.string());
293}
294
295void SimpleBestFitAllocator::dump(String8& result,
296 const char* what, uint32_t flags) const
297{
298 Mutex::Autolock _l(mLock);
299 dump_l(result, what, flags);
300}
301
302void SimpleBestFitAllocator::dump_l(String8& result,
303 const char* what, uint32_t flags) const
304{
305 size_t size = 0;
306 int32_t i = 0;
307 chunk_t const* cur = mList.head();
308
309 const size_t SIZE = 256;
310 char buffer[SIZE];
311 snprintf(buffer, SIZE, " %s (%p, size=%u)\n",
312 what, this, (unsigned int)mHeapSize);
313
314 result.append(buffer);
315
316 while (cur) {
317 const char* errs[] = {"", "| link bogus NP",
318 "| link bogus PN", "| link bogus NP+PN" };
319 int np = ((cur->next) && cur->next->prev != cur) ? 1 : 0;
320 int pn = ((cur->prev) && cur->prev->next != cur) ? 2 : 0;
321
322 snprintf(buffer, SIZE, " %3u: %08x | 0x%08X | 0x%08X | %s %s\n",
323 i, int(cur), int(cur->start*kMemoryAlign),
324 int(cur->size*kMemoryAlign),
325 int(cur->free) ? "F" : "A",
326 errs[np|pn]);
327
328 result.append(buffer);
329
330 if (!cur->free)
331 size += cur->size*kMemoryAlign;
332
333 i++;
334 cur = cur->next;
335 }
336 snprintf(buffer, SIZE, " size allocated: %u (%u KB)\n", int(size), int(size/1024));
337 result.append(buffer);
338}
339
340// ----------------------------------------------------------------------------
341
342
343SharedHeap::SharedHeap(size_t size, uint32_t flags, char const * name)
344 : MemoryHeapBase(size, flags, name)
345{
346}
347
348SharedHeap::~SharedHeap()
349{
350}
351
352sp<IMemory> SharedHeap::mapMemory(size_t offset, size_t size)
353{
354 return new SimpleMemory(this, offset, size);
355}
356
357
358SimpleMemory::SimpleMemory(const sp<IMemoryHeap>& heap,
359 ssize_t offset, size_t size)
360 : MemoryBase(heap, offset, size)
361{
362#ifndef NDEBUG
363 void* const start_ptr = (void*)(intptr_t(heap->base()) + offset);
364 memset(start_ptr, 0xda, size);
365#endif
366}
367
368SimpleMemory::~SimpleMemory()
369{
370 size_t freedOffset = getOffset();
371 size_t freedSize = getSize();
372
373 // keep the size to unmap in excess
374 size_t pagesize = getpagesize();
375 size_t start = freedOffset;
376 size_t end = start + freedSize;
377 start &= ~(pagesize-1);
378 end = (end + pagesize-1) & ~(pagesize-1);
379
380 // give back to the kernel the pages we don't need
381 size_t free_start = freedOffset;
382 size_t free_end = free_start + freedSize;
383 if (start < free_start)
384 start = free_start;
385 if (end > free_end)
386 end = free_end;
387 start = (start + pagesize-1) & ~(pagesize-1);
388 end &= ~(pagesize-1);
389
390 void* const start_ptr = (void*)(intptr_t(getHeap()->base()) + start);
391 size_t size = end-start;
392
393#ifndef NDEBUG
394 memset(start_ptr, 0xdf, size);
395#endif
396
397// MADV_REMOVE is not defined on Dapper based Goobuntu
398#ifdef MADV_REMOVE
399 if (size) {
400 int err = madvise(start_ptr, size, MADV_REMOVE);
401 LOGW_IF(err, "madvise(%p, %u, MADV_REMOVE) returned %s",
402 start_ptr, size, err<0 ? strerror(errno) : "Ok");
403 }
404#endif
405}
406
407}; // namespace android