Eliminate the heap chunk abstraction. This temporarily breaks the
hprof feature that allows stack traces to be associated with objects.
diff --git a/vm/alloc/DdmHeap.c b/vm/alloc/DdmHeap.c
index f21a875..6b11757 100644
--- a/vm/alloc/DdmHeap.c
+++ b/vm/alloc/DdmHeap.c
@@ -273,8 +273,7 @@
*/
state = HPSG_STATE(SOLIDITY_FREE, 0);
} else {
- const DvmHeapChunk *hc = (const DvmHeapChunk *)userptr;
- const Object *obj = chunk2ptr(hc);
+ const Object *obj = userptr;
/* If we're looking at the native heap, we'll just return
* (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
*/
diff --git a/vm/alloc/Heap.c b/vm/alloc/Heap.c
index de43ecf..28aff0a 100644
--- a/vm/alloc/Heap.c
+++ b/vm/alloc/Heap.c
@@ -329,9 +329,9 @@
/* Try as hard as possible to allocate some memory.
*/
-static DvmHeapChunk *tryMalloc(size_t size)
+static void *tryMalloc(size_t size)
{
- DvmHeapChunk *hc;
+ void *ptr;
/* Don't try too hard if there's no way the allocation is
* going to succeed. We have to collect SoftReferences before
@@ -340,7 +340,7 @@
if (size >= gDvm.heapSizeMax) {
LOGW_HEAP("dvmMalloc(%zu/0x%08zx): "
"someone's allocating a huge buffer\n", size, size);
- hc = NULL;
+ ptr = NULL;
goto collect_soft_refs;
}
@@ -354,9 +354,9 @@
// DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
// (or, at least, there are only 0-5 objects swept each time)
- hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
- if (hc != NULL) {
- return hc;
+ ptr = dvmHeapSourceAlloc(size);
+ if (ptr != NULL) {
+ return ptr;
}
/* The allocation failed. Free up some space by doing
@@ -364,17 +364,17 @@
* if the live set is sufficiently large.
*/
gcForMalloc(false);
- hc = dvmHeapSourceAlloc(size + sizeof(DvmHeapChunk));
- if (hc != NULL) {
- return hc;
+ ptr = dvmHeapSourceAlloc(size);
+ if (ptr != NULL) {
+ return ptr;
}
/* Even that didn't work; this is an exceptional state.
* Try harder, growing the heap if necessary.
*/
- hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
+ ptr = dvmHeapSourceAllocAndGrow(size);
dvmHeapSizeChanged();
- if (hc != NULL) {
+ if (ptr != NULL) {
size_t newHeapSize;
newHeapSize = dvmHeapSourceGetIdealFootprint();
@@ -384,7 +384,7 @@
LOGI_HEAP("Grow heap (frag case) to "
"%zu.%03zuMB for %zu-byte allocation\n",
FRACTIONAL_MB(newHeapSize), size);
- return hc;
+ return ptr;
}
/* Most allocations should have succeeded by now, so the heap
@@ -398,10 +398,10 @@
LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation\n",
size);
gcForMalloc(true);
- hc = dvmHeapSourceAllocAndGrow(size + sizeof(DvmHeapChunk));
+ ptr = dvmHeapSourceAllocAndGrow(size);
dvmHeapSizeChanged();
- if (hc != NULL) {
- return hc;
+ if (ptr != NULL) {
+ return ptr;
}
//TODO: maybe wait for finalizers and try one last time
@@ -493,7 +493,6 @@
void* dvmMalloc(size_t size, int flags)
{
GcHeap *gcHeap = gDvm.gcHeap;
- DvmHeapChunk *hc;
void *ptr;
#if 0
@@ -547,8 +546,8 @@
/* Try as hard as possible to allocate some memory.
*/
- hc = tryMalloc(size);
- if (hc != NULL) {
+ ptr = tryMalloc(size);
+ if (ptr != NULL) {
/* We've got the memory.
*/
if ((flags & ALLOC_FINALIZABLE) != 0) {
@@ -561,7 +560,7 @@
* set. scanObject() explicitly deals with the NULL clazz.
*/
if (!dvmHeapAddRefToLargeTable(&gcHeap->finalizableRefs,
- (Object *)hc->data))
+ (Object *)ptr))
{
LOGE_HEAP("dvmMalloc(): no room for any more "
"finalizable objects\n");
@@ -569,8 +568,6 @@
}
}
- ptr = hc->data;
-
/* The caller may not want us to collect this object.
* If not, throw it in the nonCollectableRefs table, which
* will be added to the root set when we GC.
@@ -604,7 +601,6 @@
} else {
/* The allocation failed.
*/
- ptr = NULL;
#ifdef WITH_PROFILER
if (gDvm.allocProf.enabled) {
@@ -647,12 +643,9 @@
*/
bool dvmIsValidObject(const Object* obj)
{
- const DvmHeapChunk *hc;
-
/* Don't bother if it's NULL or not 8-byte aligned.
*/
- hc = ptr2chunk(obj);
- if (obj != NULL && ((uintptr_t)hc & (8-1)) == 0) {
+ if (obj != NULL && ((uintptr_t)obj & (8-1)) == 0) {
/* Even if the heap isn't locked, this shouldn't return
* any false negatives. The only mutation that could
* be happening is allocation, which means that another
@@ -666,7 +659,7 @@
* Freeing will only happen during the sweep phase, which
* only happens while the heap is locked.
*/
- return dvmHeapSourceContains(hc);
+ return dvmHeapSourceContains(obj);
}
return false;
}
@@ -699,7 +692,7 @@
size_t dvmObjectSizeInHeap(const Object *obj)
{
- return dvmHeapSourceChunkSize(ptr2chunk(obj)) - sizeof(DvmHeapChunk);
+ return dvmHeapSourceChunkSize(obj);
}
/*
diff --git a/vm/alloc/HeapInternal.h b/vm/alloc/HeapInternal.h
index 9a5071f..91d5976 100644
--- a/vm/alloc/HeapInternal.h
+++ b/vm/alloc/HeapInternal.h
@@ -26,16 +26,6 @@
#define SCHEDULED_REFERENCE_MAGIC ((Object*)0x87654321)
-#define ptr2chunk(p) (((DvmHeapChunk *)(p)) - 1)
-#define chunk2ptr(p) ((void *)(((DvmHeapChunk *)(p)) + 1))
-
-typedef struct DvmHeapChunk {
-#if WITH_HPROF && WITH_HPROF_STACK
- u4 stackTraceSerialNumber;
-#endif
- u8 data[0];
-} DvmHeapChunk;
-
struct GcHeap {
HeapSource *heapSource;
diff --git a/vm/alloc/MarkSweep.c b/vm/alloc/MarkSweep.c
index 979209b..70b5269 100644
--- a/vm/alloc/MarkSweep.c
+++ b/vm/alloc/MarkSweep.c
@@ -74,11 +74,11 @@
/* Do not cast the result of this to a boolean; the only set bit
* may be > 1<<8.
*/
-static inline long isMarked(const DvmHeapChunk *hc, const GcMarkContext *ctx)
+static inline long isMarked(const void *obj, const GcMarkContext *ctx)
__attribute__((always_inline));
-static inline long isMarked(const DvmHeapChunk *hc, const GcMarkContext *ctx)
+static inline long isMarked(const void *obj, const GcMarkContext *ctx)
{
- return dvmHeapBitmapIsObjectBitSetInList(ctx->bitmaps, ctx->numBitmaps, hc);
+ return dvmHeapBitmapIsObjectBitSetInList(ctx->bitmaps, ctx->numBitmaps, obj);
}
static bool
@@ -165,13 +165,13 @@
return true;
}
-static long setAndReturnMarkBit(GcMarkContext *ctx, const DvmHeapChunk *hc)
+static long setAndReturnMarkBit(GcMarkContext *ctx, const void *obj)
__attribute__((always_inline));
static long
-setAndReturnMarkBit(GcMarkContext *ctx, const DvmHeapChunk *hc)
+setAndReturnMarkBit(GcMarkContext *ctx, const void *obj)
{
return dvmHeapBitmapSetAndReturnObjectBitInList(ctx->bitmaps,
- ctx->numBitmaps, hc);
+ ctx->numBitmaps, obj);
}
static void _markObjectNonNullCommon(const Object *obj, GcMarkContext *ctx,
@@ -181,8 +181,6 @@
_markObjectNonNullCommon(const Object *obj, GcMarkContext *ctx,
bool checkFinger, bool forceStack)
{
- DvmHeapChunk *hc;
-
assert(obj != NULL);
#if GC_DEBUG(GC_DEBUG_PARANOID)
@@ -191,11 +189,10 @@
assert(dvmIsValidObject(obj));
#endif
- hc = ptr2chunk(obj);
- if (!setAndReturnMarkBit(ctx, hc)) {
+ if (!setAndReturnMarkBit(ctx, obj)) {
/* This object was not previously marked.
*/
- if (forceStack || (checkFinger && (void *)hc < ctx->finger)) {
+ if (forceStack || (checkFinger && (void *)obj < ctx->finger)) {
/* This object will need to go on the mark stack.
*/
MARK_STACK_PUSH(ctx->stack, obj);
@@ -208,7 +205,7 @@
#endif
#if DVM_TRACK_HEAP_MARKING
gDvm.gcHeap->markCount++;
- gDvm.gcHeap->markSize += dvmHeapSourceChunkSize((void *)hc) +
+ gDvm.gcHeap->markSize += dvmHeapSourceChunkSize((void *)obj) +
HEAP_SOURCE_CHUNK_OVERHEAD;
#endif
@@ -558,7 +555,7 @@
referent = dvmGetFieldObject(obj,
gDvm.offJavaLangRefReference_referent);
if (referent != NULL &&
- !isMarked(ptr2chunk(referent), &gcHeap->markContext))
+ !isMarked(referent, &gcHeap->markContext))
{
u4 refFlags;
@@ -706,10 +703,7 @@
ctx->finger = finger;
for (i = 0; i < numPtrs; i++) {
- /* The pointers we're getting back are DvmHeapChunks,
- * not Objects.
- */
- scanObject(chunk2ptr(*ptrs++), ctx);
+ scanObject(*ptrs++, ctx);
}
return true;
@@ -805,7 +799,7 @@
// that fail this initial if(). We need to re-walk
// the list, and it would be nice to avoid the extra
// work.
- if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) {
+ if (referent != NULL && !isMarked(referent, markContext)) {
bool schedEnqueue;
/* This is the strongest reference that refers to referent.
@@ -911,7 +905,7 @@
next = dvmGetFieldObject(reference, offVmData);
referent = dvmGetFieldObject(reference, offReferent);
- if (referent != NULL && !isMarked(ptr2chunk(referent), markContext)) {
+ if (referent != NULL && !isMarked(referent, markContext)) {
markObjectNonNull(referent, markContext);
scanRequired = true;
@@ -977,10 +971,7 @@
gapRef = ref = finRefs->refs.table;
lastRef = finRefs->refs.nextEntry;
while (ref < lastRef) {
- DvmHeapChunk *hc;
-
- hc = ptr2chunk(*ref);
- if (!isMarked(hc, markContext)) {
+ if (!isMarked(*ref, markContext)) {
if (!dvmHeapAddToHeapRefTable(&newPendingRefs, *ref)) {
//TODO: add the current table and allocate
// a new, smaller one.
@@ -1094,10 +1085,7 @@
for (i = 0; i < numPtrs; i++) {
Object *obj;
- /* The pointers we're getting back are DvmHeapChunks, not
- * Objects.
- */
- obj = (Object *)chunk2ptr(*ptrs++);
+ obj = (Object *)*ptrs++;
hprofMarkRootObject(hctx, obj, 0);
hprofDumpHeapObject(hctx, obj);
@@ -1134,14 +1122,9 @@
void **origPtrs = ptrs;
for (i = 0; i < numPtrs; i++) {
- DvmHeapChunk *hc;
Object *obj;
- /* The pointers we're getting back are DvmHeapChunks, not
- * Objects.
- */
- hc = (DvmHeapChunk *)*ptrs++;
- obj = (Object *)chunk2ptr(hc);
+ obj = (Object *)*ptrs++;
/* NOTE: Dereferencing clazz is dangerous. If obj was the last
* one to reference its class object, the class object could be
@@ -1171,7 +1154,7 @@
{
int chunklen;
ClassObject *clazz = obj->clazz;
- chunklen = dvmHeapSourceChunkSize(hc);
+ chunklen = dvmHeapSourceChunkSize(obj);
memset(hc, 0xa5, chunklen);
obj->clazz = (ClassObject *)((uintptr_t)clazz ^ 0xffffffff);
}
@@ -1189,7 +1172,7 @@
*/
static int isUnmarkedObject(void *object)
{
- return !isMarked(ptr2chunk((uintptr_t)object & ~(HB_OBJECT_ALIGNMENT-1)),
+ return !isMarked((void *)((uintptr_t)object & ~(HB_OBJECT_ALIGNMENT-1)),
&gDvm.gcHeap->markContext);
}
diff --git a/vm/hprof/HprofHeap.c b/vm/hprof/HprofHeap.c
index a69e3c6..935fd26 100644
--- a/vm/hprof/HprofHeap.c
+++ b/vm/hprof/HprofHeap.c
@@ -224,7 +224,7 @@
HprofHeapId desiredHeap;
desiredHeap =
- dvmHeapSourceGetPtrFlag(ptr2chunk(obj), HS_ALLOCATED_IN_ZYGOTE) ?
+ dvmHeapSourceGetPtrFlag(obj, HS_ALLOCATED_IN_ZYGOTE) ?
HPROF_HEAP_ZYGOTE : HPROF_HEAP_APP;
if (ctx->objectsInSegment >= OBJECTS_PER_SEGMENT ||