Invoke a padding round in certain use-cases.
* In some use-cases, it is possible that there are
no AppBuffer layers on the external/virtual layer-list
during which all the pipes will be allocated to the
primary. When layers do comeup on external/virtual
layer-list, subsequent overlay sets fail.
* This change ensures that in such cases, we invoke a
padding round on all the displays to free up the
hw resources which can be used in subsequent cycles.
Change-Id: Ifac0b8f51a8719eb55b11010d05b8d11352db054
diff --git a/libhwcomposer/hwc.cpp b/libhwcomposer/hwc.cpp
index 5c9f2d1..80e7803 100644
--- a/libhwcomposer/hwc.cpp
+++ b/libhwcomposer/hwc.cpp
@@ -105,7 +105,10 @@
//Helper
static void reset(hwc_context_t *ctx, int numDisplays,
hwc_display_contents_1_t** displays) {
+
ctx->numActiveDisplays = 0;
+ ctx->isPaddingRound = false;
+
for(int i = 0; i < numDisplays; i++) {
hwc_display_contents_1_t *list = displays[i];
// XXX:SurfaceFlinger no longer guarantees that this
@@ -125,6 +128,17 @@
* the display device to be active.
*/
ctx->numActiveDisplays += 1;
+
+ if((ctx->mPrevHwLayerCount[i] == 1) and (list->numHwLayers > 1)) {
+ /* If the previous cycle for dpy 'i' has 0 AppLayers and the
+ * current cycle has atleast 1 AppLayer, padding round needs
+ * to be invoked on current cycle to free up the resources.
+ */
+ ctx->isPaddingRound = true;
+ }
+ ctx->mPrevHwLayerCount[i] = list->numHwLayers;
+ } else {
+ ctx->mPrevHwLayerCount[i] = 0;
}
if(ctx->mFBUpdate[i])
@@ -133,7 +147,6 @@
ctx->mCopyBit[i]->reset();
if(ctx->mLayerRotMap[i])
ctx->mLayerRotMap[i]->reset();
-
}
ctx->mAD->reset();
diff --git a/libhwcomposer/hwc_mdpcomp.cpp b/libhwcomposer/hwc_mdpcomp.cpp
index 4f6099e..53d614f 100644
--- a/libhwcomposer/hwc_mdpcomp.cpp
+++ b/libhwcomposer/hwc_mdpcomp.cpp
@@ -419,8 +419,8 @@
__FUNCTION__);
ret = false;
} else if(ctx->isPaddingRound) {
- ctx->isPaddingRound = false;
- ALOGD_IF(isDebug(), "%s: padding round",__FUNCTION__);
+ ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d",
+ __FUNCTION__,mDpy);
ret = false;
}
return ret;
@@ -1410,11 +1410,10 @@
const int numLayers = ctx->listStats[mDpy].numAppLayers;
MDPVersion& mdpVersion = qdutils::MDPVersion::getInstance();
- //number of app layers exceeds MAX_NUM_APP_LAYERS fall back to GPU
- //do not cache the information for next draw cycle.
- if(numLayers > MAX_NUM_APP_LAYERS) {
- ALOGI("%s: Number of App layers exceeded the limit ",
- __FUNCTION__);
+ //Do not cache the information for next draw cycle.
+ if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) {
+ ALOGI("%s: Unsupported layer count for mdp composition",
+ __FUNCTION__);
mCachedFrame.reset();
return -1;
}
diff --git a/libhwcomposer/hwc_utils.cpp b/libhwcomposer/hwc_utils.cpp
index d30984b..4ebf685 100644
--- a/libhwcomposer/hwc_utils.cpp
+++ b/libhwcomposer/hwc_utils.cpp
@@ -213,6 +213,10 @@
ctx->dpyAttr[i].mAsHeightRatio = 0;
}
+ for (uint32_t i = 0; i < HWC_NUM_DISPLAY_TYPES; i++) {
+ ctx->mPrevHwLayerCount[i] = 0;
+ }
+
MDPComp::init(ctx);
ctx->mAD = new AssertiveDisplay(ctx);
@@ -897,6 +901,7 @@
if(prevYuvCount != ctx->listStats[dpy].yuvCount) {
ctx->mVideoTransFlag = true;
}
+
if(dpy == HWC_DISPLAY_PRIMARY) {
ctx->mAD->markDoable(ctx, list);
}
diff --git a/libhwcomposer/hwc_utils.h b/libhwcomposer/hwc_utils.h
index 3393b59..25d1c21 100644
--- a/libhwcomposer/hwc_utils.h
+++ b/libhwcomposer/hwc_utils.h
@@ -483,6 +483,10 @@
eAnimationState mAnimationState[HWC_NUM_DISPLAY_TYPES];
qhwc::HWCVirtualBase *mHWCVirtual;
+ // stores the #numHwLayers of the previous frame
+ // for each display device
+ int mPrevHwLayerCount[HWC_NUM_DISPLAY_TYPES];
+
// stores the primary device orientation
int deviceOrientation;
//Securing in progress indicator