blob: 6f3c7c1074d37255b53f6ddd4bc626e65f804872 [file] [log] [blame]
The Android Open Source Projectf6c38712009-03-03 19:28:47 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16/*
17 * Garbage-collecting memory allocator.
18 */
19#include "Dalvik.h"
20#include "alloc/HeapTable.h"
21#include "alloc/Heap.h"
22#include "alloc/HeapInternal.h"
23#include "alloc/DdmHeap.h"
24#include "alloc/HeapSource.h"
25#include "alloc/MarkSweep.h"
26
27#include "utils/threads.h" // need Android thread priorities
28#define kInvalidPriority 10000
29
30#include <sys/time.h>
31#include <sys/resource.h>
32#include <limits.h>
33#include <errno.h>
34
35#define kNonCollectableRefDefault 16
36#define kFinalizableRefDefault 128
37
38/*
39 * Initialize the GC heap.
40 *
41 * Returns true if successful, false otherwise.
42 */
43bool dvmHeapStartup()
44{
45 GcHeap *gcHeap;
46
47#if defined(WITH_ALLOC_LIMITS)
48 gDvm.checkAllocLimits = false;
49 gDvm.allocationLimit = -1;
50#endif
51
52 gcHeap = dvmHeapSourceStartup(gDvm.heapSizeStart, gDvm.heapSizeMax);
53 if (gcHeap == NULL) {
54 return false;
55 }
56 gcHeap->heapWorkerCurrentObject = NULL;
57 gcHeap->heapWorkerCurrentMethod = NULL;
58 gcHeap->heapWorkerInterpStartTime = 0LL;
59 gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
60 gcHeap->softReferenceHeapSizeThreshold = gDvm.heapSizeStart;
61 gcHeap->ddmHpifWhen = 0;
62 gcHeap->ddmHpsgWhen = 0;
63 gcHeap->ddmHpsgWhat = 0;
64 gcHeap->ddmNhsgWhen = 0;
65 gcHeap->ddmNhsgWhat = 0;
66#if WITH_HPROF
67 gcHeap->hprofDumpOnGc = false;
68 gcHeap->hprofContext = NULL;
69#endif
70
71 /* This needs to be set before we call dvmHeapInitHeapRefTable().
72 */
73 gDvm.gcHeap = gcHeap;
74
75 /* Set up the table we'll use for ALLOC_NO_GC.
76 */
77 if (!dvmHeapInitHeapRefTable(&gcHeap->nonCollectableRefs,
78 kNonCollectableRefDefault))
79 {
80 LOGE_HEAP("Can't allocate GC_NO_ALLOC table\n");
81 goto fail;
82 }
83
84 /* Set up the lists and lock we'll use for finalizable
85 * and reference objects.
86 */
87 dvmInitMutex(&gDvm.heapWorkerListLock);
88 gcHeap->finalizableRefs = NULL;
89 gcHeap->pendingFinalizationRefs = NULL;
90 gcHeap->referenceOperations = NULL;
91
92 /* Initialize the HeapWorker locks and other state
93 * that the GC uses.
94 */
95 dvmInitializeHeapWorkerState();
96
97 return true;
98
99fail:
100 gDvm.gcHeap = NULL;
101 dvmHeapSourceShutdown(gcHeap);
102 return false;
103}
104
105bool dvmHeapStartupAfterZygote()
106{
107 /* Update our idea of the last GC start time so that we
108 * don't use the last time that Zygote happened to GC.
109 */
110 gDvm.gcHeap->gcStartTime = dvmGetRelativeTimeUsec();
111
112 return dvmHeapSourceStartupAfterZygote();
113}
114
115void dvmHeapShutdown()
116{
117//TODO: make sure we're locked
118 if (gDvm.gcHeap != NULL) {
119 GcHeap *gcHeap;
120
121 gcHeap = gDvm.gcHeap;
122 gDvm.gcHeap = NULL;
123
124 /* Tables are allocated on the native heap;
125 * they need to be cleaned up explicitly.
126 * The process may stick around, so we don't
127 * want to leak any native memory.
128 */
129 dvmHeapFreeHeapRefTable(&gcHeap->nonCollectableRefs);
130
131 dvmHeapFreeLargeTable(gcHeap->finalizableRefs);
132 gcHeap->finalizableRefs = NULL;
133
134 dvmHeapFreeLargeTable(gcHeap->pendingFinalizationRefs);
135 gcHeap->pendingFinalizationRefs = NULL;
136
137 dvmHeapFreeLargeTable(gcHeap->referenceOperations);
138 gcHeap->referenceOperations = NULL;
139
140 /* Destroy the heap. Any outstanding pointers
141 * will point to unmapped memory (unless/until
142 * someone else maps it). This frees gcHeap
143 * as a side-effect.
144 */
145 dvmHeapSourceShutdown(gcHeap);
146 }
147}
148
149/*
150 * We've been asked to allocate something we can't, e.g. an array so
151 * large that (length * elementWidth) is larger than 2^31. We want to
152 * throw an OutOfMemoryError, but doing so implies that certain other
153 * actions have taken place (like clearing soft references).
154 *
155 * TODO: for now we just throw an InternalError.
156 */
157void dvmThrowBadAllocException(const char* msg)
158{
159 dvmThrowException("Ljava/lang/InternalError;", msg);
160}
161
162/*
163 * Grab the lock, but put ourselves into THREAD_VMWAIT if it looks like
164 * we're going to have to wait on the mutex.
165 */
166bool dvmLockHeap()
167{
168 if (pthread_mutex_trylock(&gDvm.gcHeapLock) != 0) {
169 Thread *self;
170 ThreadStatus oldStatus;
171 int cc;
172
173 self = dvmThreadSelf();
174 if (self != NULL) {
175 oldStatus = dvmChangeStatus(self, THREAD_VMWAIT);
176 } else {
The Android Open Source Project99409882009-03-18 22:20:24 -0700177 LOGI("ODD: waiting on heap lock, no self\n");
The Android Open Source Projectf6c38712009-03-03 19:28:47 -0800178 oldStatus = -1; // shut up gcc
179 }
180
181 cc = pthread_mutex_lock(&gDvm.gcHeapLock);
182 assert(cc == 0);
183
184 if (self != NULL) {
185 dvmChangeStatus(self, oldStatus);
186 }
187 }
188
189 return true;
190}
191
192void dvmUnlockHeap()
193{
194 dvmUnlockMutex(&gDvm.gcHeapLock);
195}
196
197/* Pop an object from the list of pending finalizations and
198 * reference clears/enqueues, and return the object.
199 * The caller must call dvmReleaseTrackedAlloc()
200 * on the object when finished.
201 *
202 * Typically only called by the heap worker thread.
203 */
204Object *dvmGetNextHeapWorkerObject(HeapWorkerOperation *op)
205{
206 Object *obj;
207 LargeHeapRefTable *table;
208 GcHeap *gcHeap = gDvm.gcHeap;
209
210 assert(op != NULL);
211
212 obj = NULL;
213
214 dvmLockMutex(&gDvm.heapWorkerListLock);
215
216 /* We must handle reference operations before finalizations.
217 * If:
218 * a) Someone subclasses WeakReference and overrides clear()
219 * b) A reference of this type is the last reference to
220 * a finalizable object
221 * then we need to guarantee that the overridden clear() is called
222 * on the reference before finalize() is called on the referent.
223 * Both of these operations will always be scheduled at the same
224 * time, so handling reference operations first will guarantee
225 * the required order.
226 */
227 obj = dvmHeapGetNextObjectFromLargeTable(&gcHeap->referenceOperations);
228 if (obj != NULL) {
229 uintptr_t workBits;
230
231 workBits = (uintptr_t)obj & (WORKER_CLEAR | WORKER_ENQUEUE);
232 assert(workBits != 0);
233 obj = (Object *)((uintptr_t)obj & ~(WORKER_CLEAR | WORKER_ENQUEUE));
234
235 *op = workBits;
236 } else {
237 obj = dvmHeapGetNextObjectFromLargeTable(
238 &gcHeap->pendingFinalizationRefs);
239 if (obj != NULL) {
240 *op = WORKER_FINALIZE;
241 }
242 }
243
244 if (obj != NULL) {
245 /* Don't let the GC collect the object until the
246 * worker thread is done with it.
247 *
248 * This call is safe; it uses thread-local storage
249 * and doesn't acquire any locks.
250 */
251 dvmAddTrackedAlloc(obj, NULL);
252 }
253
254 dvmUnlockMutex(&gDvm.heapWorkerListLock);
255
256 return obj;
257}
258
259/* Used for a heap size change hysteresis to avoid collecting
260 * SoftReferences when the heap only grows by a small amount.
261 */
262#define SOFT_REFERENCE_GROWTH_SLACK (128 * 1024)
263
264/* Whenever the effective heap size may have changed,
265 * this function must be called.
266 */
267void dvmHeapSizeChanged()
268{
269 GcHeap *gcHeap = gDvm.gcHeap;
270 size_t currentHeapSize;
271
272 currentHeapSize = dvmHeapSourceGetIdealFootprint();
273
274 /* See if the heap size has changed enough that we should care
275 * about it.
276 */
277 if (currentHeapSize <= gcHeap->softReferenceHeapSizeThreshold -
278 4 * SOFT_REFERENCE_GROWTH_SLACK)
279 {
280 /* The heap has shrunk enough that we'll use this as a new
281 * threshold. Since we're doing better on space, there's
282 * no need to collect any SoftReferences.
283 *
284 * This is 4x the growth hysteresis because we don't want
285 * to snap down so easily after a shrink. If we just cleared
286 * up a bunch of SoftReferences, we don't want to disallow
287 * any new ones from being created.
288 * TODO: determine if the 4x is important, needed, or even good
289 */
290 gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
291 gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
292 } else if (currentHeapSize >= gcHeap->softReferenceHeapSizeThreshold +
293 SOFT_REFERENCE_GROWTH_SLACK)
294 {
295 /* The heap has grown enough to warrant collecting SoftReferences.
296 */
297 gcHeap->softReferenceHeapSizeThreshold = currentHeapSize;
298 gcHeap->softReferenceCollectionState = SR_COLLECT_SOME;
299 }
300}
301
302
303/* Do a full garbage collection, which may grow the
304 * heap as a side-effect if the live set is large.
305 */
306static void gcForMalloc(bool collectSoftReferences)
307{
308#ifdef WITH_PROFILER
309 if (gDvm.allocProf.enabled) {
310 Thread* self = dvmThreadSelf();
311 gDvm.allocProf.gcCount++;
312 if (self != NULL) {
313 self->allocProf.gcCount++;
314 }
315 }
316#endif
317 /* This may adjust the soft limit as a side-effect.
318 */
319 LOGD_HEAP("dvmMalloc initiating GC%s\n",
320 collectSoftReferences ? "(collect SoftReferences)" : "");
321 dvmCollectGarbageInternal(collectSoftReferences);
322}
323
324/* Try as hard as possible to allocate some memory.
325 */
326static DvmHeapChunk *tryMalloc(size_t size)
327{
328 DvmHeapChunk *hc;
329
330 /* Don't try too hard if there's no way the allocation is
331 * going to succeed. We have to collect SoftReferences before
332 * throwing an OOME, though.
333 */
334 if (size >= gDvm.heapSizeMax) {
335 LOGW_HEAP("dvmMalloc(%zu/0x%08zx): "
336 "someone's allocating a huge buffer\n", size, size);
337 hc = NULL;
338 goto collect_soft_refs;
339 }
340
341//TODO: figure out better heuristics
342// There will be a lot of churn if someone allocates a bunch of
343// big objects in a row, and we hit the frag case each time.
344// A full GC for each.
345// Maybe we grow the heap in bigger leaps
346// Maybe we skip the GC if the size is large and we did one recently
347// (number of allocations ago) (watch for thread effects)
348// DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
349// (or, at least, there are only 0-5 objects swept each time)
350
351 hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
352 if (hc != NULL) {
353 return hc;
354 }
355
356 /* The allocation failed. Free up some space by doing
357 * a full garbage collection. This may grow the heap
358 * if the live set is sufficiently large.
359 */
360 gcForMalloc(false);
361 hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
362 if (hc != NULL) {
363 return hc;
364 }
365
366 /* Even that didn't work; this is an exceptional state.
367 * Try harder, growing the heap if necessary.
368 */
369 hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
370 dvmHeapSizeChanged();
371 if (hc != NULL) {
372 size_t newHeapSize;
373
374 newHeapSize = dvmHeapSourceGetIdealFootprint();
375//TODO: may want to grow a little bit more so that the amount of free
376// space is equal to the old free space + the utilization slop for
377// the new allocation.
378 LOGI_HEAP("Grow heap (frag case) to "
379 "%zu.%03zuMB for %zu-byte allocation\n",
380 FRACTIONAL_MB(newHeapSize), size);
381 return hc;
382 }
383
384 /* Most allocations should have succeeded by now, so the heap
385 * is really full, really fragmented, or the requested size is
386 * really big. Do another GC, collecting SoftReferences this
387 * time. The VM spec requires that all SoftReferences have
388 * been collected and cleared before throwing an OOME.
389 */
390//TODO: wait for the finalizers from the previous GC to finish
391collect_soft_refs:
392 LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation\n",
393 size);
394 gcForMalloc(true);
395 hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
396 dvmHeapSizeChanged();
397 if (hc != NULL) {
398 return hc;
399 }
400//TODO: maybe wait for finalizers and try one last time
401
402 LOGE_HEAP("Out of memory on a %zd-byte allocation.\n", size);
403//TODO: tell the HeapSource to dump its state
404 dvmDumpThread(dvmThreadSelf(), false);
405
406 return NULL;
407}
408
409/* Throw an OutOfMemoryError if there's a thread to attach it to.
410 * Avoid recursing.
411 *
412 * The caller must not be holding the heap lock, or else the allocations
413 * in dvmThrowException() will deadlock.
414 */
415static void throwOOME()
416{
417 Thread *self;
418
419 if ((self = dvmThreadSelf()) != NULL) {
420 /* If the current (failing) dvmMalloc() happened as part of thread
421 * creation/attachment before the thread became part of the root set,
422 * we can't rely on the thread-local trackedAlloc table, so
423 * we can't keep track of a real allocated OOME object. But, since
424 * the thread is in the process of being created, it won't have
425 * a useful stack anyway, so we may as well make things easier
426 * by throwing the (stackless) pre-built OOME.
427 */
428 if (dvmIsOnThreadList(self) && !self->throwingOOME) {
429 /* Let ourselves know that we tried to throw an OOM
430 * error in the normal way in case we run out of
431 * memory trying to allocate it inside dvmThrowException().
432 */
433 self->throwingOOME = true;
434
435 /* Don't include a description string;
436 * one fewer allocation.
437 */
438 dvmThrowException("Ljava/lang/OutOfMemoryError;", NULL);
439 } else {
440 /*
441 * This thread has already tried to throw an OutOfMemoryError,
442 * which probably means that we're running out of memory
443 * while recursively trying to throw.
444 *
445 * To avoid any more allocation attempts, "throw" a pre-built
446 * OutOfMemoryError object (which won't have a useful stack trace).
447 *
448 * Note that since this call can't possibly allocate anything,
449 * we don't care about the state of self->throwingOOME
450 * (which will usually already be set).
451 */
452 dvmSetException(self, gDvm.outOfMemoryObj);
453 }
454 /* We're done with the possible recursion.
455 */
456 self->throwingOOME = false;
457 }
458}
459
460/*
461 * Allocate storage on the GC heap. We guarantee 8-byte alignment.
462 *
463 * The new storage is zeroed out.
464 *
465 * Note that, in rare cases, this could get called while a GC is in
466 * progress. If a non-VM thread tries to attach itself through JNI,
467 * it will need to allocate some objects. If this becomes annoying to
468 * deal with, we can block it at the source, but holding the allocation
469 * mutex should be enough.
470 *
471 * In rare circumstances (JNI AttachCurrentThread) we can be called
472 * from a non-VM thread.
473 *
474 * We implement ALLOC_NO_GC by maintaining an internal list of objects
475 * that should not be collected. This requires no actual flag storage in
476 * the object itself, which is good, but makes flag queries expensive.
477 *
478 * Use ALLOC_DONT_TRACK when we either don't want to track an allocation
479 * (because it's being done for the interpreter "new" operation and will
480 * be part of the root set immediately) or we can't (because this allocation
481 * is for a brand new thread).
482 *
483 * Returns NULL and throws an exception on failure.
484 *
485 * TODO: don't do a GC if the debugger thinks all threads are suspended
486 */
487void* dvmMalloc(size_t size, int flags)
488{
489 GcHeap *gcHeap = gDvm.gcHeap;
490 DvmHeapChunk *hc;
491 void *ptr;
492 bool triedGc, triedGrowing;
493
494#if 0
495 /* handy for spotting large allocations */
496 if (size >= 100000) {
497 LOGI("dvmMalloc(%d):\n", size);
498 dvmDumpThread(dvmThreadSelf(), false);
499 }
500#endif
501
502#if defined(WITH_ALLOC_LIMITS)
503 /*
504 * See if they've exceeded the allocation limit for this thread.
505 *
506 * A limit value of -1 means "no limit".
507 *
508 * This is enabled at compile time because it requires us to do a
509 * TLS lookup for the Thread pointer. This has enough of a performance
510 * impact that we don't want to do it if we don't have to. (Now that
511 * we're using gDvm.checkAllocLimits we may want to reconsider this,
512 * but it's probably still best to just compile the check out of
513 * production code -- one less thing to hit on every allocation.)
514 */
515 if (gDvm.checkAllocLimits) {
516 Thread* self = dvmThreadSelf();
517 if (self != NULL) {
518 int count = self->allocLimit;
519 if (count > 0) {
520 self->allocLimit--;
521 } else if (count == 0) {
522 /* fail! */
523 assert(!gDvm.initializing);
524 self->allocLimit = -1;
525 dvmThrowException("Ldalvik/system/AllocationLimitError;",
526 "thread allocation limit exceeded");
527 return NULL;
528 }
529 }
530 }
531
532 if (gDvm.allocationLimit >= 0) {
533 assert(!gDvm.initializing);
534 gDvm.allocationLimit = -1;
535 dvmThrowException("Ldalvik/system/AllocationLimitError;",
536 "global allocation limit exceeded");
537 return NULL;
538 }
539#endif
540
541 dvmLockHeap();
542
543 /* Try as hard as possible to allocate some memory.
544 */
545 hc = tryMalloc(size);
546 if (hc != NULL) {
547alloc_succeeded:
548 /* We've got the memory.
549 */
550 if ((flags & ALLOC_FINALIZABLE) != 0) {
551 /* This object is an instance of a class that
552 * overrides finalize(). Add it to the finalizable list.
553 *
554 * Note that until DVM_OBJECT_INIT() is called on this
555 * object, its clazz will be NULL. Since the object is
556 * in this table, it will be scanned as part of the root
557 * set. scanObject() explicitly deals with the NULL clazz.
558 */
559 if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs,
560 (Object *)hc->data))
561 {
562 LOGE_HEAP("dvmMalloc(): no room for any more "
563 "finalizable objects\n");
564 dvmAbort();
565 }
566 }
567
568#if WITH_OBJECT_HEADERS
569 hc->header = OBJECT_HEADER;
570 hc->birthGeneration = gGeneration;
571#endif
572 ptr = hc->data;
573
574 /* The caller may not want us to collect this object.
575 * If not, throw it in the nonCollectableRefs table, which
576 * will be added to the root set when we GC.
577 *
578 * Note that until DVM_OBJECT_INIT() is called on this
579 * object, its clazz will be NULL. Since the object is
580 * in this table, it will be scanned as part of the root
581 * set. scanObject() explicitly deals with the NULL clazz.
582 */
583 if ((flags & ALLOC_NO_GC) != 0) {
584 if (!dvmHeapAddToHeapRefTable(&gcHeap->nonCollectableRefs, ptr)) {
585 LOGE_HEAP("dvmMalloc(): no room for any more "
586 "ALLOC_NO_GC objects: %zd\n",
587 dvmHeapNumHeapRefTableEntries(
588 &gcHeap->nonCollectableRefs));
589 dvmAbort();
590 }
591 }
592
593#ifdef WITH_PROFILER
594 if (gDvm.allocProf.enabled) {
595 Thread* self = dvmThreadSelf();
596 gDvm.allocProf.allocCount++;
597 gDvm.allocProf.allocSize += size;
598 if (self != NULL) {
599 self->allocProf.allocCount++;
600 self->allocProf.allocSize += size;
601 }
602 }
603#endif
604 } else {
605 /* The allocation failed.
606 */
607 ptr = NULL;
608
609#ifdef WITH_PROFILER
610 if (gDvm.allocProf.enabled) {
611 Thread* self = dvmThreadSelf();
612 gDvm.allocProf.failedAllocCount++;
613 gDvm.allocProf.failedAllocSize += size;
614 if (self != NULL) {
615 self->allocProf.failedAllocCount++;
616 self->allocProf.failedAllocSize += size;
617 }
618 }
619#endif
620 }
621
622 dvmUnlockHeap();
623
624 if (ptr != NULL) {
625 /*
626 * If this block is immediately GCable, and they haven't asked us not
627 * to track it, add it to the internal tracking list.
628 *
629 * If there's no "self" yet, we can't track it. Calls made before
630 * the Thread exists should use ALLOC_NO_GC.
631 */
632 if ((flags & (ALLOC_DONT_TRACK | ALLOC_NO_GC)) == 0) {
633 dvmAddTrackedAlloc(ptr, NULL);
634 }
635 } else {
636 /*
637 * The allocation failed; throw an OutOfMemoryError.
638 */
639 throwOOME();
640 }
641
642 return ptr;
643}
644
645/*
646 * Returns true iff <obj> points to a valid allocated object.
647 */
648bool dvmIsValidObject(const Object* obj)
649{
650 const DvmHeapChunk *hc;
651
652 /* Don't bother if it's NULL or not 8-byte aligned.
653 */
654 hc = ptr2chunk(obj);
655 if (obj != NULL && ((uintptr_t)hc & (8-1)) == 0) {
656 /* Even if the heap isn't locked, this shouldn't return
657 * any false negatives. The only mutation that could
658 * be happening is allocation, which means that another
659 * thread could be in the middle of a read-modify-write
660 * to add a new bit for a new object. However, that
661 * RMW will have completed by the time any other thread
662 * could possibly see the new pointer, so there is no
663 * danger of dvmIsValidObject() being called on a valid
664 * pointer whose bit isn't set.
665 *
666 * Freeing will only happen during the sweep phase, which
667 * only happens while the heap is locked.
668 */
669 return dvmHeapSourceContains(hc);
670 }
671 return false;
672}
673
674/*
675 * Clear flags that were passed into dvmMalloc() et al.
676 * e.g., ALLOC_NO_GC, ALLOC_DONT_TRACK.
677 */
678void dvmClearAllocFlags(Object *obj, int mask)
679{
680 if ((mask & ALLOC_NO_GC) != 0) {
681 dvmLockHeap();
682 if (dvmIsValidObject(obj)) {
683 if (!dvmHeapRemoveFromHeapRefTable(&gDvm.gcHeap->nonCollectableRefs,
684 obj))
685 {
686 LOGE_HEAP("dvmMalloc(): failed to remove ALLOC_NO_GC bit from "
687 "object 0x%08x\n", (uintptr_t)obj);
688 dvmAbort();
689 }
690//TODO: shrink if the table is very empty
691 }
692 dvmUnlockHeap();
693 }
694
695 if ((mask & ALLOC_DONT_TRACK) != 0) {
696 dvmReleaseTrackedAlloc(obj, NULL);
697 }
698}
699
700size_t dvmObjectSizeInHeap(const Object *obj)
701{
702 return dvmHeapSourceChunkSize(ptr2chunk(obj)) - sizeof(DvmHeapChunk);
703}
704
705/*
706 * Initiate garbage collection.
707 *
708 * NOTES:
709 * - If we don't hold gDvm.threadListLock, it's possible for a thread to
710 * be added to the thread list while we work. The thread should NOT
711 * start executing, so this is only interesting when we start chasing
712 * thread stacks. (Before we do so, grab the lock.)
713 *
714 * We are not allowed to GC when the debugger has suspended the VM, which
715 * is awkward because debugger requests can cause allocations. The easiest
716 * way to enforce this is to refuse to GC on an allocation made by the
717 * JDWP thread -- we have to expand the heap or fail.
718 */
719void dvmCollectGarbageInternal(bool collectSoftReferences)
720{
721 GcHeap *gcHeap = gDvm.gcHeap;
722 Object *softReferences;
723 Object *weakReferences;
724 Object *phantomReferences;
725
726 u8 now;
727 s8 timeSinceLastGc;
728 s8 gcElapsedTime;
729 int numFreed;
730 size_t sizeFreed;
731
732#if DVM_TRACK_HEAP_MARKING
733 /* Since weak and soft references are always cleared,
734 * they don't require any marking.
735 * (Soft are lumped into strong when they aren't cleared.)
736 */
737 size_t strongMarkCount = 0;
738 size_t strongMarkSize = 0;
739 size_t finalizeMarkCount = 0;
740 size_t finalizeMarkSize = 0;
741 size_t phantomMarkCount = 0;
742 size_t phantomMarkSize = 0;
743#endif
744
745 /* The heap lock must be held.
746 */
747
748 if (gcHeap->gcRunning) {
749 LOGW_HEAP("Attempted recursive GC\n");
750 return;
751 }
752 gcHeap->gcRunning = true;
753 now = dvmGetRelativeTimeUsec();
754 if (gcHeap->gcStartTime != 0) {
755 timeSinceLastGc = (now - gcHeap->gcStartTime) / 1000;
756 } else {
757 timeSinceLastGc = 0;
758 }
759 gcHeap->gcStartTime = now;
760
761 LOGV_HEAP("GC starting -- suspending threads\n");
762
763 dvmSuspendAllThreads(SUSPEND_FOR_GC);
764
765 /* Get the priority (the "nice" value) of the current thread. The
766 * getpriority() call can legitimately return -1, so we have to
767 * explicitly test errno.
768 */
769 errno = 0;
770 int oldThreadPriority = kInvalidPriority;
771 int priorityResult = getpriority(PRIO_PROCESS, 0);
772 if (errno != 0) {
773 LOGI_HEAP("getpriority(self) failed: %s\n", strerror(errno));
774 } else if (priorityResult > ANDROID_PRIORITY_NORMAL) {
775 /* Current value is numerically greater than "normal", which
776 * in backward UNIX terms means lower priority.
777 */
778 if (setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL) != 0) {
779 LOGI_HEAP("Unable to elevate priority from %d to %d\n",
780 priorityResult, ANDROID_PRIORITY_NORMAL);
781 } else {
782 /* priority elevated; save value so we can restore it later */
783 LOGD_HEAP("Elevating priority from %d to %d\n",
784 priorityResult, ANDROID_PRIORITY_NORMAL);
785 oldThreadPriority = priorityResult;
786 }
787 }
788
789 /* Wait for the HeapWorker thread to block.
790 * (It may also already be suspended in interp code,
791 * in which case it's not holding heapWorkerLock.)
792 */
793 dvmLockMutex(&gDvm.heapWorkerLock);
794
795 /* Make sure that the HeapWorker thread hasn't become
796 * wedged inside interp code. If it has, this call will
797 * print a message and abort the VM.
798 */
799 dvmAssertHeapWorkerThreadRunning();
800
801 /* Lock the pendingFinalizationRefs list.
802 *
803 * Acquire the lock after suspending so the finalizer
804 * thread can't block in the RUNNING state while
805 * we try to suspend.
806 */
807 dvmLockMutex(&gDvm.heapWorkerListLock);
808
809#ifdef WITH_PROFILER
810 dvmMethodTraceGCBegin();
811#endif
812
813#if WITH_HPROF
814
815/* Set DUMP_HEAP_ON_DDMS_UPDATE to 1 to enable heap dumps
816 * whenever DDMS requests a heap update (HPIF chunk).
817 * The output files will appear in /data/misc, which must
818 * already exist.
819 * You must define "WITH_HPROF := true" in your buildspec.mk
820 * and recompile libdvm for this to work.
821 *
822 * To enable stack traces for each allocation, define
823 * "WITH_HPROF_STACK := true" in buildspec.mk. This option slows down
824 * allocations and also requires 8 additional bytes per object on the
825 * GC heap.
826 */
827#define DUMP_HEAP_ON_DDMS_UPDATE 0
828#if DUMP_HEAP_ON_DDMS_UPDATE
829 gcHeap->hprofDumpOnGc |= (gcHeap->ddmHpifWhen != 0);
830#endif
831
832 if (gcHeap->hprofDumpOnGc) {
833 char nameBuf[128];
834
The Android Open Source Project99409882009-03-18 22:20:24 -0700835 gcHeap->hprofResult = -1;
836
The Android Open Source Projectf6c38712009-03-03 19:28:47 -0800837 if (gcHeap->hprofFileName == NULL) {
838 /* no filename was provided; invent one */
839 sprintf(nameBuf, "/data/misc/heap-dump-tm%d-pid%d.hprof",
840 (int) time(NULL), (int) getpid());
841 gcHeap->hprofFileName = nameBuf;
842 }
843 gcHeap->hprofContext = hprofStartup(gcHeap->hprofFileName);
844 if (gcHeap->hprofContext != NULL) {
845 hprofStartHeapDump(gcHeap->hprofContext);
846 }
847 gcHeap->hprofDumpOnGc = false;
848 gcHeap->hprofFileName = NULL;
849 }
850#endif
851
852 if (timeSinceLastGc < 10000) {
853 LOGD_HEAP("GC! (%dms since last GC)\n",
854 (int)timeSinceLastGc);
855 } else {
856 LOGD_HEAP("GC! (%d sec since last GC)\n",
857 (int)(timeSinceLastGc / 1000));
858 }
859#if DVM_TRACK_HEAP_MARKING
860 gcHeap->markCount = 0;
861 gcHeap->markSize = 0;
862#endif
863
864 /* Set up the marking context.
865 */
The Android Open Source Project99409882009-03-18 22:20:24 -0700866 if (!dvmHeapBeginMarkStep()) {
867 LOGE_HEAP("dvmHeapBeginMarkStep failed; aborting\n");
868 dvmAbort();
869 }
The Android Open Source Projectf6c38712009-03-03 19:28:47 -0800870
871 /* Mark the set of objects that are strongly reachable from the roots.
872 */
873 LOGD_HEAP("Marking...");
874 dvmHeapMarkRootSet();
875
876 /* dvmHeapScanMarkedObjects() will build the lists of known
877 * instances of the Reference classes.
878 */
879 gcHeap->softReferences = NULL;
880 gcHeap->weakReferences = NULL;
881 gcHeap->phantomReferences = NULL;
882
883 /* Make sure that we don't hard-mark the referents of Reference
884 * objects by default.
885 */
886 gcHeap->markAllReferents = false;
887
888 /* Don't mark SoftReferences if our caller wants us to collect them.
889 * This has to be set before calling dvmHeapScanMarkedObjects().
890 */
891 if (collectSoftReferences) {
892 gcHeap->softReferenceCollectionState = SR_COLLECT_ALL;
893 }
894
895 /* Recursively mark any objects that marked objects point to strongly.
896 * If we're not collecting soft references, soft-reachable
897 * objects will also be marked.
898 */
899 LOGD_HEAP("Recursing...");
900 dvmHeapScanMarkedObjects();
901#if DVM_TRACK_HEAP_MARKING
902 strongMarkCount = gcHeap->markCount;
903 strongMarkSize = gcHeap->markSize;
904 gcHeap->markCount = 0;
905 gcHeap->markSize = 0;
906#endif
907
908 /* Latch these so that the other calls to dvmHeapScanMarkedObjects() don't
909 * mess with them.
910 */
911 softReferences = gcHeap->softReferences;
912 weakReferences = gcHeap->weakReferences;
913 phantomReferences = gcHeap->phantomReferences;
914
915 /* All strongly-reachable objects have now been marked.
916 */
917 if (gcHeap->softReferenceCollectionState != SR_COLLECT_NONE) {
918 LOGD_HEAP("Handling soft references...");
919 dvmHeapHandleReferences(softReferences, REF_SOFT);
920 // markCount always zero
921
922 /* Now that we've tried collecting SoftReferences,
923 * fall back to not collecting them. If the heap
924 * grows, we will start collecting again.
925 */
926 gcHeap->softReferenceCollectionState = SR_COLLECT_NONE;
927 } // else dvmHeapScanMarkedObjects() already marked the soft-reachable set
928 LOGD_HEAP("Handling weak references...");
929 dvmHeapHandleReferences(weakReferences, REF_WEAK);
930 // markCount always zero
931
932 /* Once all weak-reachable objects have been taken
933 * care of, any remaining unmarked objects can be finalized.
934 */
935 LOGD_HEAP("Finding finalizations...");
936 dvmHeapScheduleFinalizations();
937#if DVM_TRACK_HEAP_MARKING
938 finalizeMarkCount = gcHeap->markCount;
939 finalizeMarkSize = gcHeap->markSize;
940 gcHeap->markCount = 0;
941 gcHeap->markSize = 0;
942#endif
943
944 /* Any remaining objects that are not pending finalization
945 * could be phantom-reachable. This will mark any phantom-reachable
946 * objects, as well as enqueue their references.
947 */
948 LOGD_HEAP("Handling phantom references...");
949 dvmHeapHandleReferences(phantomReferences, REF_PHANTOM);
950#if DVM_TRACK_HEAP_MARKING
951 phantomMarkCount = gcHeap->markCount;
952 phantomMarkSize = gcHeap->markSize;
953 gcHeap->markCount = 0;
954 gcHeap->markSize = 0;
955#endif
956
957//TODO: take care of JNI weak global references
958
959#if DVM_TRACK_HEAP_MARKING
960 LOGI_HEAP("Marked objects: %dB strong, %dB final, %dB phantom\n",
961 strongMarkSize, finalizeMarkSize, phantomMarkSize);
962#endif
963
964#ifdef WITH_DEADLOCK_PREDICTION
965 dvmDumpMonitorInfo("before sweep");
966#endif
967 LOGD_HEAP("Sweeping...");
968 dvmHeapSweepUnmarkedObjects(&numFreed, &sizeFreed);
969#ifdef WITH_DEADLOCK_PREDICTION
970 dvmDumpMonitorInfo("after sweep");
971#endif
972
973 LOGD_HEAP("Cleaning up...");
974 dvmHeapFinishMarkStep();
975
976 LOGD_HEAP("Done.");
977
978 /* Now's a good time to adjust the heap size, since
979 * we know what our utilization is.
980 *
981 * This doesn't actually resize any memory;
982 * it just lets the heap grow more when necessary.
983 */
984 dvmHeapSourceGrowForUtilization();
985 dvmHeapSizeChanged();
986
987#if WITH_HPROF
988 if (gcHeap->hprofContext != NULL) {
989 hprofFinishHeapDump(gcHeap->hprofContext);
990//TODO: write a HEAP_SUMMARY record
The Android Open Source Project99409882009-03-18 22:20:24 -0700991 if (hprofShutdown(gcHeap->hprofContext))
992 gcHeap->hprofResult = 0; /* indicate success */
The Android Open Source Projectf6c38712009-03-03 19:28:47 -0800993 gcHeap->hprofContext = NULL;
994 }
995#endif
996
997 /* Now that we've freed up the GC heap, return any large
998 * free chunks back to the system. They'll get paged back
999 * in the next time they're used. Don't do it immediately,
1000 * though; if the process is still allocating a bunch of
1001 * memory, we'll be taking a ton of page faults that we don't
1002 * necessarily need to.
1003 *
1004 * Cancel any old scheduled trims, and schedule a new one.
1005 */
1006 dvmScheduleHeapSourceTrim(5); // in seconds
1007
1008#ifdef WITH_PROFILER
1009 dvmMethodTraceGCEnd();
1010#endif
1011 LOGV_HEAP("GC finished -- resuming threads\n");
1012
1013 gcHeap->gcRunning = false;
1014
1015 dvmUnlockMutex(&gDvm.heapWorkerListLock);
1016 dvmUnlockMutex(&gDvm.heapWorkerLock);
1017
1018 dvmResumeAllThreads(SUSPEND_FOR_GC);
1019 if (oldThreadPriority != kInvalidPriority) {
1020 if (setpriority(PRIO_PROCESS, 0, oldThreadPriority) != 0) {
1021 LOGW_HEAP("Unable to reset priority to %d: %s\n",
1022 oldThreadPriority, strerror(errno));
1023 } else {
1024 LOGD_HEAP("Reset priority to %d\n", oldThreadPriority);
1025 }
1026 }
1027 gcElapsedTime = (dvmGetRelativeTimeUsec() - gcHeap->gcStartTime) / 1000;
1028 if (gcElapsedTime < 10000) {
1029 LOGD("GC freed %d objects / %zd bytes in %dms\n",
1030 numFreed, sizeFreed, (int)gcElapsedTime);
1031 } else {
1032 LOGD("GC freed %d objects / %zd bytes in %d sec\n",
1033 numFreed, sizeFreed, (int)(gcElapsedTime / 1000));
1034 }
1035 dvmLogGcStats(numFreed, sizeFreed, gcElapsedTime);
1036
1037 if (gcHeap->ddmHpifWhen != 0) {
1038 LOGD_HEAP("Sending VM heap info to DDM\n");
1039 dvmDdmSendHeapInfo(gcHeap->ddmHpifWhen, false);
1040 }
1041 if (gcHeap->ddmHpsgWhen != 0) {
1042 LOGD_HEAP("Dumping VM heap to DDM\n");
1043 dvmDdmSendHeapSegments(false, false);
1044 }
1045 if (gcHeap->ddmNhsgWhen != 0) {
1046 LOGD_HEAP("Dumping native heap to DDM\n");
1047 dvmDdmSendHeapSegments(false, true);
1048 }
1049}
1050
1051#if WITH_HPROF
1052/*
1053 * Perform garbage collection, writing heap information to the specified file.
1054 *
1055 * If "fileName" is NULL, a suitable name will be generated automatically.
The Android Open Source Project99409882009-03-18 22:20:24 -07001056 *
1057 * Returns 0 on success, or an error code on failure.
The Android Open Source Projectf6c38712009-03-03 19:28:47 -08001058 */
The Android Open Source Project99409882009-03-18 22:20:24 -07001059int hprofDumpHeap(const char* fileName)
The Android Open Source Projectf6c38712009-03-03 19:28:47 -08001060{
The Android Open Source Project99409882009-03-18 22:20:24 -07001061 int result;
1062
The Android Open Source Projectf6c38712009-03-03 19:28:47 -08001063 dvmLockMutex(&gDvm.gcHeapLock);
1064
1065 gDvm.gcHeap->hprofDumpOnGc = true;
1066 gDvm.gcHeap->hprofFileName = fileName;
1067 dvmCollectGarbageInternal(false);
The Android Open Source Project99409882009-03-18 22:20:24 -07001068 result = gDvm.gcHeap->hprofResult;
The Android Open Source Projectf6c38712009-03-03 19:28:47 -08001069
1070 dvmUnlockMutex(&gDvm.gcHeapLock);
The Android Open Source Project99409882009-03-18 22:20:24 -07001071
1072 return result;
The Android Open Source Projectf6c38712009-03-03 19:28:47 -08001073}
1074
1075void dvmHeapSetHprofGcScanState(hprof_heap_tag_t state, u4 threadSerialNumber)
1076{
1077 if (gDvm.gcHeap->hprofContext != NULL) {
1078 hprofSetGcScanState(gDvm.gcHeap->hprofContext, state,
1079 threadSerialNumber);
1080 }
1081}
1082#endif