hwc: Include more cases in partial MDP composition.
When MDP cannot support a layer, we send the whole list of layers to
GPU. If such layers are not updating they can be cached on FB while
allowing the rest to take the MDP route.
Example # 1: Volume bar on video in 90 rotated mode. Since volume bar
is not h/w accelerated, there is no prerotation and we invoke the GPU
for redraws each round. With this change we won't because of caching.
Example # 2: If prerotation is applied to layers on primary, the
secondary layers would show a transform and the whole list is redrawn
with GPU. Now we could make use of caching.
This change also imposes a 1 layer limit on secondary, but now allows
full and partial MDP comp, thus benefiting secondary-only content.
Change-Id: I7eb43ba817f1081f317fefa99fa0c507344060e8
diff --git a/libhwcomposer/hwc_mdpcomp.cpp b/libhwcomposer/hwc_mdpcomp.cpp
index 44db2bc..49b931e 100644
--- a/libhwcomposer/hwc_mdpcomp.cpp
+++ b/libhwcomposer/hwc_mdpcomp.cpp
@@ -62,11 +62,8 @@
dumpsys_log(buf,"HWC Map for Dpy: %s \n",
(mDpy == 0) ? "\"PRIMARY\"" :
(mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\"");
- dumpsys_log(buf,"PREV_FRAME: layerCount:%2d mdpCount:%2d \
- cacheCount:%2d \n", mCachedFrame.layerCount,
- mCachedFrame.mdpCount, mCachedFrame.cacheCount);
- dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d \
- fbCount:%2d \n", mCurrentFrame.layerCount,
+ dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d "
+ "fbCount:%2d \n", mCurrentFrame.layerCount,
mCurrentFrame.mdpCount, mCurrentFrame.fbCount);
dumpsys_log(buf,"needsFBRedraw:%3s pipesUsed:%2d MaxPipesPerMixer: %d \n",
(mCurrentFrame.needsRedraw? "YES" : "NO"),
@@ -234,7 +231,7 @@
void MDPComp::LayerCache::reset() {
memset(&hnd, 0, sizeof(hnd));
mdpCount = 0;
- cacheCount = 0;
+ fbCount = 0;
layerCount = 0;
fbZ = -1;
}
@@ -248,11 +245,22 @@
void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) {
mdpCount = curFrame.mdpCount;
- cacheCount = curFrame.fbCount;
+ fbCount = curFrame.fbCount;
layerCount = curFrame.layerCount;
fbZ = curFrame.fbZ;
}
+bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) {
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if((not isYuvBuffer(hnd) and has90Transform(layer)) or
+ (not isValidDimension(ctx,layer))
+ //More conditions here, SKIP, sRGB+Blend etc
+ ) {
+ return false;
+ }
+ return true;
+}
+
bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) {
const int dpy = HWC_DISPLAY_PRIMARY;
private_handle_t *hnd = (private_handle_t *)layer->handle;
@@ -380,12 +388,6 @@
return false;
}
- if(mDpy > HWC_DISPLAY_PRIMARY){
- ALOGD_IF(isDebug(), "%s: Cannot support External display(s)",
- __FUNCTION__);
- return false;
- }
-
if(isSkipPresent(ctx, mDpy)) {
ALOGD_IF(isDebug(),"%s: SKIP present: %d",
__FUNCTION__,
@@ -399,27 +401,17 @@
return false;
}
- //MDP composition is not efficient if layer needs rotator.
for(int i = 0; i < numAppLayers; ++i) {
- // As MDP h/w supports flip operation, use MDP comp only for
- // 180 transforms. Fail for any transform involving 90 (90, 270).
hwc_layer_1_t* layer = &list->hwLayers[i];
private_handle_t *hnd = (private_handle_t *)layer->handle;
- if(layer->transform & HWC_TRANSFORM_ROT_90) {
- if(!isYuvBuffer(hnd) ) {
- ALOGD_IF(isDebug(), "%s: orientation involved",__FUNCTION__);
- return false;
- }else if(!canUseRotator(ctx, mDpy)) {
- ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__);
+ if(isYuvBuffer(hnd) && has90Transform(layer)) {
+ if(!canUseRotator(ctx, mDpy)) {
+ ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d",
+ __FUNCTION__, mDpy);
return false;
}
}
- if(!isValidDimension(ctx,layer)) {
- ALOGD_IF(isDebug(), "%s: Buffer is of invalid width",
- __FUNCTION__);
- return false;
- }
//For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp
// may not need it if Gfx pre-rotation can handle all flips & rotations
@@ -445,6 +437,22 @@
}
bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
+ //Will benefit presentation / secondary-only layer.
+ if((mDpy > HWC_DISPLAY_PRIMARY) &&
+ (list->numHwLayers - 1) > MAX_SEC_LAYERS) {
+ ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
+ return false;
+ }
+
+ const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+ for(int i = 0; i < numAppLayers; i++) {
+ hwc_layer_1_t* layer = &list->hwLayers[i];
+ if(not isSupportedForMDPComp(ctx, layer)) {
+ ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
+ return false;
+ }
+ }
+
//Setup mCurrentFrame
mCurrentFrame.mdpCount = mCurrentFrame.layerCount;
mCurrentFrame.fbCount = 0;
@@ -482,10 +490,35 @@
//Setup mCurrentFrame
mCurrentFrame.reset(numAppLayers);
updateLayerCache(ctx, list);
+
+ //If an MDP marked layer is unsupported cannot do partial MDP Comp
+ for(int i = 0; i < numAppLayers; i++) {
+ if(!mCurrentFrame.isFBComposed[i]) {
+ hwc_layer_1_t* layer = &list->hwLayers[i];
+ if(not isSupportedForMDPComp(ctx, layer)) {
+ ALOGD_IF(isDebug(), "%s: Unsupported layer in list",
+ __FUNCTION__);
+ return false;
+ }
+ }
+ }
+
updateYUV(ctx, list, false /*secure only*/);
- batchLayers(); //sets up fbZ also
+ bool ret = batchLayers(ctx, list); //sets up fbZ also
+ if(!ret) {
+ ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
+ return false;
+ }
int mdpCount = mCurrentFrame.mdpCount;
+
+ //Will benefit cases where a video has non-updating background.
+ if((mDpy > HWC_DISPLAY_PRIMARY) and
+ (mdpCount > MAX_SEC_LAYERS)) {
+ ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
+ return false;
+ }
+
if(mdpCount > (sMaxPipesPerMixer - 1)) { // -1 since FB is used
ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
return false;
@@ -572,7 +605,7 @@
return true;
}
-void MDPComp::batchLayers() {
+bool MDPComp::batchLayers(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
/* Idea is to keep as many contiguous non-updating(cached) layers in FB and
* send rest of them through MDP. NEVER mark an updating layer for caching.
* But cached ones can be marked for MDP*/
@@ -583,11 +616,11 @@
/* All or Nothing is cached. No batching needed */
if(!mCurrentFrame.fbCount) {
mCurrentFrame.fbZ = -1;
- return;
+ return true;
}
if(!mCurrentFrame.mdpCount) {
mCurrentFrame.fbZ = 0;
- return;
+ return true;
}
/* Search for max number of contiguous (cached) layers */
@@ -607,7 +640,13 @@
/* reset rest of the layers for MDP comp */
for(int i = 0; i < mCurrentFrame.layerCount; i++) {
- if(i != maxBatchStart){
+ hwc_layer_1_t* layer = &list->hwLayers[i];
+ if(i != maxBatchStart) {
+ //If an unsupported layer is being attempted to be pulled out we
+ //should fail
+ if(not isSupportedForMDPComp(ctx, layer)) {
+ return false;
+ }
mCurrentFrame.isFBComposed[i] = false;
} else {
i += maxBatchCount;
@@ -620,17 +659,19 @@
ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
mCurrentFrame.fbCount);
+
+ return true;
}
void MDPComp::updateLayerCache(hwc_context_t* ctx,
- hwc_display_contents_1_t* list) {
-
+ hwc_display_contents_1_t* list) {
int numAppLayers = ctx->listStats[mDpy].numAppLayers;
- int numCacheableLayers = 0;
+ int fbCount = 0;
for(int i = 0; i < numAppLayers; i++) {
+ hwc_layer_1_t* layer = &list->hwLayers[i];
if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) {
- numCacheableLayers++;
+ fbCount++;
mCurrentFrame.isFBComposed[i] = true;
} else {
mCurrentFrame.isFBComposed[i] = false;
@@ -638,10 +679,11 @@
}
}
- mCurrentFrame.fbCount = numCacheableLayers;
- mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
- mCurrentFrame.fbCount;
- ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, numCacheableLayers);
+ mCurrentFrame.fbCount = fbCount;
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount;
+
+ ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d",__FUNCTION__,
+ mCurrentFrame.mdpCount, mCurrentFrame.fbCount);
}
void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
@@ -836,12 +878,11 @@
mCurrentFrame.needsRedraw = false;
if(mCurrentFrame.fbCount &&
((mCurrentFrame.mdpCount != mCachedFrame.mdpCount) ||
- (mCurrentFrame.fbCount != mCachedFrame.cacheCount) ||
+ (mCurrentFrame.fbCount != mCachedFrame.fbCount) ||
(mCurrentFrame.fbZ != mCachedFrame.fbZ) ||
(!mCurrentFrame.mdpCount) ||
(list->flags & HWC_GEOMETRY_CHANGED) ||
- isSkipPresent(ctx, mDpy) ||
- (mDpy > HWC_DISPLAY_PRIMARY))) {
+ isSkipPresent(ctx, mDpy))) {
mCurrentFrame.needsRedraw = true;
}
}
diff --git a/libhwcomposer/hwc_mdpcomp.h b/libhwcomposer/hwc_mdpcomp.h
index 7fb0968..7aa75a2 100644
--- a/libhwcomposer/hwc_mdpcomp.h
+++ b/libhwcomposer/hwc_mdpcomp.h
@@ -55,6 +55,8 @@
static void reset() { sCompBytesClaimed = 0; };
protected:
+ enum { MAX_SEC_LAYERS = 1 }; //TODO add property support
+
enum ePipeType {
MDPCOMP_OV_RGB = ovutils::OV_MDP_PIPE_RGB,
MDPCOMP_OV_VG = ovutils::OV_MDP_PIPE_VG,
@@ -103,7 +105,7 @@
struct LayerCache {
int layerCount;
int mdpCount;
- int cacheCount;
+ int fbCount;
int fbZ;
buffer_handle_t hnd[MAX_NUM_APP_LAYERS];
@@ -158,13 +160,14 @@
/* tracks non updating layers*/
void updateLayerCache(hwc_context_t* ctx, hwc_display_contents_1_t* list);
/* optimize layers for mdp comp*/
- void batchLayers();
+ bool batchLayers(hwc_context_t *ctx, hwc_display_contents_1_t* list);
/* updates cache map with YUV info */
void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
bool secureOnly);
bool programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list);
bool programYUV(hwc_context_t *ctx, hwc_display_contents_1_t* list);
void reset(const int& numAppLayers, hwc_display_contents_1_t* list);
+ bool isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer);
int mDpy;
static bool sEnabled;
diff --git a/libhwcomposer/hwc_utils.h b/libhwcomposer/hwc_utils.h
index 7668b5f..e616e42 100644
--- a/libhwcomposer/hwc_utils.h
+++ b/libhwcomposer/hwc_utils.h
@@ -405,6 +405,11 @@
static inline bool isYuvPresent (hwc_context_t *ctx, int dpy) {
return ctx->listStats[dpy].yuvCount;
}
+
+static inline bool has90Transform(hwc_layer_1_t *layer) {
+ return (layer->transform & HWC_TRANSFORM_ROT_90);
+}
+
};
#endif //HWC_UTILS_H