Merge "liboverlay: explicitly set alignment to 1M for secure buffers"
diff --git a/libexternal/external.cpp b/libexternal/external.cpp
index 609f9c1..045edd8 100644
--- a/libexternal/external.cpp
+++ b/libexternal/external.cpp
@@ -588,7 +588,9 @@
// if primary resolution is more than the hdmi resolution
// configure dpy attr to primary resolution and set
// downscale mode
- if((priW * priH) > (width * height)) {
+ // Restrict this upto 1080p resolution max
+ if(((priW * priH) > (width * height)) &&
+ (priW <= qdutils::MAX_DISPLAY_DIM )) {
mHwcContext->dpyAttr[HWC_DISPLAY_EXTERNAL].xres = priW;
mHwcContext->dpyAttr[HWC_DISPLAY_EXTERNAL].yres = priH;
// HDMI is always in landscape, so always assign the higher
diff --git a/libgralloc/alloc_controller.cpp b/libgralloc/alloc_controller.cpp
index 9af45e2..98047e4 100644
--- a/libgralloc/alloc_controller.cpp
+++ b/libgralloc/alloc_controller.cpp
@@ -184,12 +184,6 @@
IonController::IonController()
{
mIonAlloc = new IonAlloc();
- mUseTZProtection = false;
- char property[PROPERTY_VALUE_MAX];
- if ((property_get("persist.gralloc.cp.level3", property, NULL) <= 0) ||
- (atoi(property) != 1)) {
- mUseTZProtection = true;
- }
}
int IonController::allocate(alloc_data& data, int usage)
@@ -210,7 +204,7 @@
ionFlags |= ION_HEAP(ION_IOMMU_HEAP_ID);
if(usage & GRALLOC_USAGE_PROTECTED) {
- if ((mUseTZProtection) && (usage & GRALLOC_USAGE_PRIVATE_MM_HEAP)) {
+ if (usage & GRALLOC_USAGE_PRIVATE_MM_HEAP) {
ionFlags |= ION_HEAP(ION_CP_MM_HEAP_ID);
ionFlags |= ION_SECURE;
} else {
diff --git a/libgralloc/alloc_controller.h b/libgralloc/alloc_controller.h
index 8954d39..5fe81fa 100644
--- a/libgralloc/alloc_controller.h
+++ b/libgralloc/alloc_controller.h
@@ -65,7 +65,6 @@
private:
IonAlloc* mIonAlloc;
- bool mUseTZProtection;
};
} //end namespace gralloc
diff --git a/libhwcomposer/hwc_mdpcomp.cpp b/libhwcomposer/hwc_mdpcomp.cpp
index ff33e94..e65d5e7 100644
--- a/libhwcomposer/hwc_mdpcomp.cpp
+++ b/libhwcomposer/hwc_mdpcomp.cpp
@@ -576,14 +576,26 @@
bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list)
{
- int numAppLayers = ctx->listStats[mDpy].numAppLayers;
-
if(!sEnableMixedMode) {
//Mixed mode is disabled. No need to even try caching.
return false;
}
- //Setup mCurrentFrame
+ bool ret = false;
+ if(isLoadBasedCompDoable(ctx, list)) {
+ ret = loadBasedComp(ctx, list);
+ }
+
+ if(!ret) {
+ ret = cacheBasedComp(ctx, list);
+ }
+
+ return ret;
+}
+
+bool MDPComp::cacheBasedComp(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
mCurrentFrame.reset(numAppLayers);
updateLayerCache(ctx, list);
@@ -633,6 +645,77 @@
return true;
}
+bool MDPComp::loadBasedComp(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+ mCurrentFrame.reset(numAppLayers);
+
+ //TODO BatchSize could be optimized further based on available pipes, split
+ //displays etc.
+ const int batchSize = numAppLayers - (sMaxPipesPerMixer - 1);
+ if(batchSize <= 0) {
+ ALOGD_IF(isDebug(), "%s: Not attempting", __FUNCTION__);
+ return false;
+ }
+
+ int minBatchStart = -1;
+ size_t minBatchPixelCount = SIZE_MAX;
+
+ for(int i = 0; i <= numAppLayers - batchSize; i++) {
+ uint32_t batchPixelCount = 0;
+ for(int j = i; j < i + batchSize; j++) {
+ hwc_layer_1_t* layer = &list->hwLayers[j];
+ hwc_rect_t crop = layer->sourceCrop;
+ batchPixelCount += (crop.right - crop.left) *
+ (crop.bottom - crop.top);
+ }
+
+ if(batchPixelCount < minBatchPixelCount) {
+ minBatchPixelCount = batchPixelCount;
+ minBatchStart = i;
+ }
+ }
+
+ if(minBatchStart < 0) {
+ ALOGD_IF(isDebug(), "%s: No batch found batchSize %d numAppLayers %d",
+ __FUNCTION__, batchSize, numAppLayers);
+ return false;
+ }
+
+ for(int i = 0; i < numAppLayers; i++) {
+ if(i < minBatchStart || i >= minBatchStart + batchSize) {
+ hwc_layer_1_t* layer = &list->hwLayers[i];
+ if(not isSupportedForMDPComp(ctx, layer)) {
+ ALOGD_IF(isDebug(), "%s: MDP unsupported layer found at %d",
+ __FUNCTION__, i);
+ return false;
+ }
+ mCurrentFrame.isFBComposed[i] = false;
+ }
+ }
+
+ mCurrentFrame.fbZ = minBatchStart;
+ mCurrentFrame.fbCount = batchSize;
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount - batchSize;
+
+ if(!arePipesAvailable(ctx, list)) {
+ return false;
+ }
+
+ ALOGD_IF(isDebug(), "%s: fbZ %d batchSize %d",
+ __FUNCTION__, mCurrentFrame.fbZ, batchSize);
+ return true;
+}
+
+bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ if(mDpy or isSecurePresent(ctx, mDpy) or
+ not (list->flags & HWC_GEOMETRY_CHANGED)) {
+ return false;
+ }
+ return true;
+}
+
bool MDPComp::isOnlyVideoDoable(hwc_context_t *ctx,
hwc_display_contents_1_t* list, bool secureOnly) {
int numAppLayers = ctx->listStats[mDpy].numAppLayers;
diff --git a/libhwcomposer/hwc_mdpcomp.h b/libhwcomposer/hwc_mdpcomp.h
index e1839cd..3882bee 100644
--- a/libhwcomposer/hwc_mdpcomp.h
+++ b/libhwcomposer/hwc_mdpcomp.h
@@ -144,6 +144,13 @@
bool fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
/* check if we can use layer cache to do at least partial MDP comp */
bool partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* Partial MDP comp that uses caching to save power as primary goal */
+ bool cacheBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* Partial MDP comp that uses number of pixels to optimize perf goal */
+ bool loadBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* Checks if its worth doing load based partial comp */
+ bool isLoadBasedCompDoable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list);
/* checks for conditions where only video can be bypassed */
bool isOnlyVideoDoable(hwc_context_t *ctx, hwc_display_contents_1_t* list,
bool secureOnly);
diff --git a/libhwcomposer/hwc_uevents.cpp b/libhwcomposer/hwc_uevents.cpp
index 8c3d00d..8906216 100644
--- a/libhwcomposer/hwc_uevents.cpp
+++ b/libhwcomposer/hwc_uevents.cpp
@@ -141,6 +141,7 @@
"event", __FUNCTION__);
ctx->proc->hotplug(ctx->proc, HWC_DISPLAY_EXTERNAL,
EXTERNAL_OFFLINE);
+ ctx->mVirtualonExtActive = false;
}
break;
}
@@ -309,7 +310,10 @@
char thread_name[64] = HWC_UEVENT_THREAD_NAME;
prctl(PR_SET_NAME, (unsigned long) &thread_name, 0, 0, 0);
setpriority(PRIO_PROCESS, 0, HAL_PRIORITY_URGENT_DISPLAY);
- uevent_init();
+ if(!uevent_init()) {
+ ALOGE("%s: failed to init uevent ",__FUNCTION__);
+ return NULL;
+ }
while(1) {
len = uevent_next_event(udata, sizeof(udata) - 2);
diff --git a/libhwcomposer/hwc_utils.cpp b/libhwcomposer/hwc_utils.cpp
index 7e0ecd8..f5dc70c 100644
--- a/libhwcomposer/hwc_utils.cpp
+++ b/libhwcomposer/hwc_utils.cpp
@@ -704,6 +704,7 @@
ctx->listStats[dpy].skipCount = 0;
ctx->listStats[dpy].needsAlphaScale = false;
ctx->listStats[dpy].preMultipliedAlpha = false;
+ ctx->listStats[dpy].isSecurePresent = false;
ctx->listStats[dpy].yuvCount = 0;
char property[PROPERTY_VALUE_MAX];
ctx->listStats[dpy].extOnlyLayerIndex = -1;
@@ -733,6 +734,10 @@
//reset yuv indices
ctx->listStats[dpy].yuvIndices[i] = -1;
+ if (isSecureBuffer(hnd)) {
+ ctx->listStats[dpy].isSecurePresent = true;
+ }
+
if (isSkipLayer(&list->hwLayers[i])) {
ctx->listStats[dpy].skipCount++;
}
diff --git a/libhwcomposer/hwc_utils.h b/libhwcomposer/hwc_utils.h
index 3f1aace..2510e4f 100644
--- a/libhwcomposer/hwc_utils.h
+++ b/libhwcomposer/hwc_utils.h
@@ -103,6 +103,7 @@
bool isDisplayAnimating;
ovutils::Dim roi;
bool secureUI; // Secure display layer
+ bool isSecurePresent;
};
struct LayerProp {
@@ -421,6 +422,10 @@
return (layer->transform & HWC_TRANSFORM_ROT_90);
}
+inline bool isSecurePresent(hwc_context_t *ctx, int dpy) {
+ return ctx->listStats[dpy].isSecurePresent;
+}
+
};
#endif //HWC_UTILS_H