Merge "hwc/overlay: Set MDP_SOLID_FILL flag for Color layer."
diff --git a/libgralloc/gpu.cpp b/libgralloc/gpu.cpp
index f15a973..b4da363 100644
--- a/libgralloc/gpu.cpp
+++ b/libgralloc/gpu.cpp
@@ -277,6 +277,13 @@
grallocFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; //NV21
}
+ if (format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
+ (usage & GRALLOC_USAGE_HW_COMPOSER )) {
+ //XXX: If we still haven't set a format, default to
+ //RGBA8888
+ grallocFormat = HAL_PIXEL_FORMAT_RGBA_8888;
+ }
+
getGrallocInformationFromFormat(grallocFormat, &bufferType);
size = getBufferSizeAndDimensions(w, h, grallocFormat, alignedw, alignedh);
diff --git a/libhwcomposer/hwc.cpp b/libhwcomposer/hwc.cpp
index 22e69f3..eb999f7 100644
--- a/libhwcomposer/hwc.cpp
+++ b/libhwcomposer/hwc.cpp
@@ -486,7 +486,7 @@
//TODO We dont check for SKIP flag on this layer because we need PAN
//always. Last layer is always FB
private_handle_t *hnd = (private_handle_t *)fbLayer->handle;
- if(copybitDone) {
+ if(copybitDone && ctx->mMDP.version >= qdutils::MDP_V4_0) {
hnd = ctx->mCopyBit[dpy]->getCurrentRenderBuffer();
}
diff --git a/libhwcomposer/hwc_copybit.cpp b/libhwcomposer/hwc_copybit.cpp
index 4695a4f..47b9225 100644
--- a/libhwcomposer/hwc_copybit.cpp
+++ b/libhwcomposer/hwc_copybit.cpp
@@ -199,7 +199,8 @@
}
//Allocate render buffers if they're not allocated
- if (useCopybitForYUV || useCopybitForRGB) {
+ if (ctx->mMDP.version != qdutils::MDP_V3_0_4 &&
+ (useCopybitForYUV || useCopybitForRGB)) {
int ret = allocRenderBuffers(mAlignedFBWidth,
mAlignedFBHeight,
HAL_PIXEL_FORMAT_RGBA_8888);
@@ -223,7 +224,10 @@
// Mark all layers to be drawn by copybit
for (int i = ctx->listStats[dpy].numAppLayers-1; i >= 0 ; i--) {
layerProp[i].mFlags |= HWC_COPYBIT;
- list->hwLayers[i].compositionType = HWC_OVERLAY;
+ if (ctx->mMDP.version == qdutils::MDP_V3_0_4)
+ list->hwLayers[i].compositionType = HWC_BLIT;
+ else
+ list->hwLayers[i].compositionType = HWC_OVERLAY;
}
}
@@ -254,13 +258,20 @@
// draw layers marked for COPYBIT
int retVal = true;
int copybitLayerCount = 0;
+ uint32_t last = 0;
LayerProp *layerProp = ctx->layerProp[dpy];
+ private_handle_t *renderBuffer;
if(mCopyBitDraw == false) // there is no layer marked for copybit
return false ;
//render buffer
- private_handle_t *renderBuffer = getCurrentRenderBuffer();
+ if (ctx->mMDP.version == qdutils::MDP_V3_0_4) {
+ last = list->numHwLayers - 1;
+ renderBuffer = (private_handle_t *)list->hwLayers[last].handle;
+ } else {
+ renderBuffer = getCurrentRenderBuffer();
+ }
if (!renderBuffer) {
ALOGE("%s: Render buffer layer handle is NULL", __FUNCTION__);
return false;
@@ -274,9 +285,9 @@
mRelFd[mCurRenderBufferIndex] = -1;
}
} else {
- if(mRelFd[mCurRenderBufferIndex] >=0) {
+ if(list->hwLayers[last].acquireFenceFd >=0) {
copybit_device_t *copybit = getCopyBitDevice();
- copybit->set_sync(copybit, mRelFd[mCurRenderBufferIndex]);
+ copybit->set_sync(copybit, list->hwLayers[last].acquireFenceFd);
}
}
@@ -316,10 +327,10 @@
copybit_device_t *copybit = getCopyBitDevice();
// Async mode
copybit->flush_get_fence(copybit, fd);
- if(mRelFd[mCurRenderBufferIndex] >=0 &&
- ctx->mMDP.version == qdutils::MDP_V3_0_4) {
- close(mRelFd[mCurRenderBufferIndex]);
- mRelFd[mCurRenderBufferIndex] = -1;
+ if(ctx->mMDP.version == qdutils::MDP_V3_0_4 &&
+ list->hwLayers[last].acquireFenceFd >= 0) {
+ close(list->hwLayers[last].acquireFenceFd);
+ list->hwLayers[last].acquireFenceFd = -1;
}
}
return true;
diff --git a/libhwcomposer/hwc_fbupdate.cpp b/libhwcomposer/hwc_fbupdate.cpp
index 0ca5ad9..d601f8f 100644
--- a/libhwcomposer/hwc_fbupdate.cpp
+++ b/libhwcomposer/hwc_fbupdate.cpp
@@ -176,6 +176,8 @@
}
calcExtDisplayPosition(ctx, NULL, mDpy, sourceCrop, displayFrame,
transform, orient);
+ //Store the displayFrame, will be used in getDisplayViewFrame
+ ctx->dpyAttr[mDpy].mDstRect = displayFrame;
setMdpFlags(layer, mdpFlags, 0, transform);
// For External use rotator if there is a rotation value set
ret = preRotateExtDisplay(ctx, layer, info,
diff --git a/libhwcomposer/hwc_mdpcomp.cpp b/libhwcomposer/hwc_mdpcomp.cpp
index 63b4aa5..8048d71 100644
--- a/libhwcomposer/hwc_mdpcomp.cpp
+++ b/libhwcomposer/hwc_mdpcomp.cpp
@@ -157,10 +157,11 @@
return true;
}
-void MDPComp::reset(const int& numLayers, hwc_display_contents_1_t* list) {
+void MDPComp::reset(hwc_context_t *ctx) {
+ const int numLayers = ctx->listStats[mDpy].numAppLayers;
mCurrentFrame.reset(numLayers);
- mCachedFrame.cacheAll(list);
- mCachedFrame.updateCounts(mCurrentFrame);
+ ctx->mOverlay->clear(mDpy);
+ ctx->mLayerRotMap[mDpy]->clear();
}
void MDPComp::timeout_handler(void *udata) {
@@ -200,6 +201,16 @@
}
}
+void MDPComp::setRedraw(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ mCurrentFrame.needsRedraw = false;
+ if(!mCachedFrame.isSameFrame(mCurrentFrame, list) ||
+ (list->flags & HWC_GEOMETRY_CHANGED) ||
+ isSkipPresent(ctx, mDpy)) {
+ mCurrentFrame.needsRedraw = true;
+ }
+}
+
MDPComp::FrameInfo::FrameInfo() {
reset(0);
}
@@ -513,7 +524,7 @@
/* Checks for conditions where all the layers marked for MDP comp cannot be
* bypassed. On such conditions we try to bypass atleast YUV layers */
-bool MDPComp::isFullFrameDoable(hwc_context_t *ctx,
+bool MDPComp::tryFullFrame(hwc_context_t *ctx,
hwc_display_contents_1_t* list){
const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
@@ -563,11 +574,6 @@
ret = true;
}
- if(!hwLimitationsCheck(ctx, list)) {
- ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__);
- return false;
- }
-
return ret;
}
@@ -596,6 +602,7 @@
qhwc::needsScaling(layer))
return false;
}
+
mCurrentFrame.fbCount = 0;
mCurrentFrame.fbZ = -1;
memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop,
@@ -607,8 +614,9 @@
adjustForSourceSplit(ctx, list);
}
- if(!resourceCheck(ctx, list)) {
- ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
+ if(!postHeuristicsHandling(ctx, list)) {
+ ALOGD_IF(isDebug(), "post heuristic handling failed");
+ reset(ctx);
return false;
}
@@ -649,6 +657,7 @@
if(not isSupportedForMDPComp(ctx, layer)) {
ALOGD_IF(isDebug(), "%s: Unsupported layer in list",
__FUNCTION__);
+ reset(ctx);
return false;
}
}
@@ -658,6 +667,7 @@
bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
if(!ret) {
ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
+ reset(ctx);
return false;
}
@@ -671,11 +681,13 @@
if((mDpy > HWC_DISPLAY_PRIMARY) and
(mdpCount > MAX_SEC_LAYERS)) {
ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
+ reset(ctx);
return false;
}
- if(!resourceCheck(ctx, list)) {
- ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
+ if(!postHeuristicsHandling(ctx, list)) {
+ ALOGD_IF(isDebug(), "post heuristic handling failed");
+ reset(ctx);
return false;
}
@@ -731,6 +743,7 @@
if(not isSupportedForMDPComp(ctx, layer)) {
ALOGD_IF(isDebug(), "%s: MDP unsupported layer found at %d",
__FUNCTION__, i);
+ reset(ctx);
return false;
}
mCurrentFrame.isFBComposed[i] = false;
@@ -741,17 +754,19 @@
mCurrentFrame.fbCount = batchSize;
mCurrentFrame.mdpCount = mCurrentFrame.layerCount - batchSize;
+ ALOGD_IF(isDebug(), "%s: fbZ %d batchSize %d",
+ __FUNCTION__, mCurrentFrame.fbZ, batchSize);
+
if(sEnable4k2kYUVSplit){
adjustForSourceSplit(ctx, list);
}
- if(!resourceCheck(ctx, list)) {
- ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
+ if(!postHeuristicsHandling(ctx, list)) {
+ ALOGD_IF(isDebug(), "post heuristic handling failed");
+ reset(ctx);
return false;
}
- ALOGD_IF(isDebug(), "%s: fbZ %d batchSize %d",
- __FUNCTION__, mCurrentFrame.fbZ, batchSize);
return true;
}
@@ -792,6 +807,7 @@
if(not isSupportedForMDPComp(ctx, layer)) {
ALOGD_IF(isDebug(), "%s: MDP unsupported layer found at %d",
__FUNCTION__, i);
+ reset(ctx);
return false;
}
mCurrentFrame.isFBComposed[i] = false;
@@ -801,19 +817,20 @@
mCurrentFrame.fbCount = fbBatchSize;
mCurrentFrame.mdpCount = mCurrentFrame.layerCount - fbBatchSize;
+ ALOGD_IF(isDebug(), "%s: FB Z %d, num app layers %d, MDP Batch Size %d",
+ __FUNCTION__, mCurrentFrame.fbZ, numAppLayers,
+ numAppLayers - fbBatchSize);
+
if(sEnable4k2kYUVSplit){
adjustForSourceSplit(ctx, list);
}
- if(!resourceCheck(ctx, list)) {
- ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
+ if(!postHeuristicsHandling(ctx, list)) {
+ ALOGD_IF(isDebug(), "post heuristic handling failed");
+ reset(ctx);
return false;
}
- ALOGD_IF(isDebug(), "%s: FB Z %d, num app layers %d, MDP Batch Size %d",
- __FUNCTION__, mCurrentFrame.fbZ, numAppLayers,
- numAppLayers - fbBatchSize);
-
return true;
}
@@ -825,7 +842,14 @@
return true;
}
-bool MDPComp::isOnlyVideoDoable(hwc_context_t *ctx,
+bool MDPComp::tryVideoOnly(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ const bool secureOnly = true;
+ return videoOnlyComp(ctx, list, not secureOnly) or
+ videoOnlyComp(ctx, list, secureOnly);
+}
+
+bool MDPComp::videoOnlyComp(hwc_context_t *ctx,
hwc_display_contents_1_t* list, bool secureOnly) {
int numAppLayers = ctx->listStats[mDpy].numAppLayers;
@@ -833,21 +857,28 @@
updateYUV(ctx, list, secureOnly);
int mdpCount = mCurrentFrame.mdpCount;
- if(!isYuvPresent(ctx, mDpy)) {
+ if(!isYuvPresent(ctx, mDpy) or (mdpCount == 0)) {
+ reset(ctx);
return false;
}
/* Bail out if we are processing only secured video layers
* and we dont have any */
if(!isSecurePresent(ctx, mDpy) && secureOnly){
+ reset(ctx);
return false;
}
- if(!mdpCount)
- return false;
+ if(mCurrentFrame.fbCount)
+ mCurrentFrame.fbZ = mCurrentFrame.mdpCount;
- if(!resourceCheck(ctx, list)) {
- ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
+ if(sEnable4k2kYUVSplit){
+ adjustForSourceSplit(ctx, list);
+ }
+
+ if(!postHeuristicsHandling(ctx, list)) {
+ ALOGD_IF(isDebug(), "post heuristic handling failed");
+ reset(ctx);
return false;
}
@@ -1102,7 +1133,21 @@
mCurrentFrame.fbCount);
}
-bool MDPComp::programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
+bool MDPComp::postHeuristicsHandling(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+
+ //Capability checks
+ if(!resourceCheck(ctx, list)) {
+ ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
+ return false;
+ }
+
+ //Limitations checks
+ if(!hwLimitationsCheck(ctx, list)) {
+ ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__);
+ return false;
+ }
+
//Configure framebuffer first if applicable
if(mCurrentFrame.fbZ >= 0) {
if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, mCurrentFrame.fbZ)) {
@@ -1112,6 +1157,8 @@
}
}
+ mCurrentFrame.map();
+
if(!allocLayerPipes(ctx, list)) {
ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__);
return false;
@@ -1152,6 +1199,7 @@
}
}
+ setRedraw(ctx, list);
return true;
}
@@ -1276,21 +1324,20 @@
const int numLayers = ctx->listStats[mDpy].numAppLayers;
MDPVersion& mdpVersion = qdutils::MDPVersion::getInstance();
+ //number of app layers exceeds MAX_NUM_APP_LAYERS fall back to GPU
+ //do not cache the information for next draw cycle.
+ if(numLayers > MAX_NUM_APP_LAYERS) {
+ ALOGI("%s: Number of App layers exceeded the limit ",
+ __FUNCTION__);
+ mCachedFrame.reset();
+ return -1;
+ }
+
//reset old data
mCurrentFrame.reset(numLayers);
memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
mCurrentFrame.dropCount = 0;
- //number of app layers exceeds MAX_NUM_APP_LAYERS fall back to GPU
- //do not cache the information for next draw cycle.
- if(numLayers > MAX_NUM_APP_LAYERS) {
- mCachedFrame.updateCounts(mCurrentFrame);
- ALOGI("%s: Number of App layers exceeded the limit ",
- __FUNCTION__);
- ret = -1;
- return ret;
- }
-
// Detect the start of animation and fall back to GPU only once to cache
// all the layers in FB and display FB content untill animation completes.
if(ctx->listStats[mDpy].isDisplayAnimating) {
@@ -1308,86 +1355,40 @@
}
//Hard conditions, if not met, cannot do MDP comp
- if(!isFrameDoable(ctx)) {
- ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame",
- __FUNCTION__);
- reset(numLayers, list);
- ret = -1;
- goto exit;
- }
+ if(isFrameDoable(ctx)) {
+ generateROI(ctx, list);
- generateROI(ctx, list);
-
- //Convert from kbps to gbps
- sMaxBw = mdpVersion.getHighBw() / 1000000.0;
- if (ctx->mExtDisplay->isConnected() || ctx->mMDP.panel != MIPI_CMD_PANEL) {
- sMaxBw = mdpVersion.getLowBw() / 1000000.0;
- }
-
- //Check whether layers marked for MDP Composition is actually doable.
- if(isFullFrameDoable(ctx, list)) {
- mCurrentFrame.map();
- //Acquire and Program MDP pipes
- if(!programMDP(ctx, list)) {
- reset(numLayers, list);
- ctx->mOverlay->clear(mDpy);
- ctx->mLayerRotMap[mDpy]->clear();
- ret = -1;
- goto exit;
- } else { //Success
- //Any change in composition types needs an FB refresh
- mCurrentFrame.needsRedraw = false;
- if(!mCachedFrame.isSameFrame(mCurrentFrame, list) ||
- (list->flags & HWC_GEOMETRY_CHANGED) ||
- isSkipPresent(ctx, mDpy)) {
- mCurrentFrame.needsRedraw = true;
- }
- }
- } else if(isOnlyVideoDoable(ctx, list, false /*secure only*/) ||
- isOnlyVideoDoable(ctx, list, true /*secure only*/)) {
- //All layers marked for MDP comp cannot be bypassed.
- //Try to compose atleast YUV layers through MDP comp and let
- //all the RGB layers compose in FB
- //Destination over
-
- mCurrentFrame.fbZ = -1;
- if(mCurrentFrame.fbCount)
- mCurrentFrame.fbZ = mCurrentFrame.mdpCount;
-
- if(sEnable4k2kYUVSplit){
- adjustForSourceSplit(ctx, list);
+ //Convert from kbps to gbps
+ sMaxBw = mdpVersion.getHighBw() / 1000000.0;
+ if (ctx->mExtDisplay->isConnected() ||
+ ctx->mMDP.panel != MIPI_CMD_PANEL) {
+ sMaxBw = mdpVersion.getLowBw() / 1000000.0;
}
- mCurrentFrame.map();
-
- if(!programMDP(ctx, list)) {
- reset(numLayers, list);
- ctx->mOverlay->clear(mDpy);
- ctx->mLayerRotMap[mDpy]->clear();
+ if(tryFullFrame(ctx, list) || tryVideoOnly(ctx, list)) {
+ setMDPCompLayerFlags(ctx, list);
+ } else {
+ reset(ctx);
+ memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
+ mCurrentFrame.dropCount = 0;
ret = -1;
- goto exit;
}
} else {
- reset(numLayers, list);
- memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
- mCurrentFrame.dropCount = 0;
+ ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame",
+ __FUNCTION__);
ret = -1;
- goto exit;
}
- //UpdateLayerFlags
- setMDPCompLayerFlags(ctx, list);
- mCachedFrame.cacheAll(list);
- mCachedFrame.updateCounts(mCurrentFrame);
- // unlock it before calling dump function to avoid deadlock
if(isDebug()) {
- ALOGD("GEOMETRY change: %d", (list->flags & HWC_GEOMETRY_CHANGED));
+ ALOGD("GEOMETRY change: %d",
+ (list->flags & HWC_GEOMETRY_CHANGED));
android::String8 sDump("");
dump(sDump);
- ALOGE("%s",sDump.string());
+ ALOGD("%s",sDump.string());
}
-exit:
+ mCachedFrame.cacheAll(list);
+ mCachedFrame.updateCounts(mCurrentFrame);
double panelRefRate =
1000000000.0 / ctx->dpyAttr[mDpy].vsync_period;
sBwClaimed += calcMDPBytesRead(ctx, list) * panelRefRate;
diff --git a/libhwcomposer/hwc_mdpcomp.h b/libhwcomposer/hwc_mdpcomp.h
index 7e52679..05a560b 100644
--- a/libhwcomposer/hwc_mdpcomp.h
+++ b/libhwcomposer/hwc_mdpcomp.h
@@ -149,10 +149,12 @@
/* set/reset flags for MDPComp */
void setMDPCompLayerFlags(hwc_context_t *ctx,
hwc_display_contents_1_t* list);
+ void setRedraw(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list);
/* checks for conditions where mdpcomp is not possible */
bool isFrameDoable(hwc_context_t *ctx);
/* checks for conditions where RGB layers cannot be bypassed */
- bool isFullFrameDoable(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ bool tryFullFrame(hwc_context_t *ctx, hwc_display_contents_1_t* list);
/* checks if full MDP comp can be done */
bool fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
/* check if we can use layer cache to do at least partial MDP comp */
@@ -173,7 +175,8 @@
bool isLoadBasedCompDoable(hwc_context_t *ctx,
hwc_display_contents_1_t* list);
/* checks for conditions where only video can be bypassed */
- bool isOnlyVideoDoable(hwc_context_t *ctx, hwc_display_contents_1_t* list,
+ bool tryVideoOnly(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ bool videoOnlyComp(hwc_context_t *ctx, hwc_display_contents_1_t* list,
bool secureOnly);
/* checks for conditions where YUV layers cannot be bypassed */
bool isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
@@ -212,8 +215,16 @@
/* updates cache map with YUV info */
void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
bool secureOnly);
- bool programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list);
- void reset(const int& numAppLayers, hwc_display_contents_1_t* list);
+ /* Validates if the GPU/MDP layer split chosen by a strategy is supported
+ * by MDP.
+ * Sets up MDP comp data structures to reflect covnversion from layers to
+ * overlay pipes.
+ * Configures overlay.
+ * Configures if GPU should redraw.
+ */
+ bool postHeuristicsHandling(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list);
+ void reset(hwc_context_t *ctx);
bool isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer);
bool resourceCheck(hwc_context_t *ctx, hwc_display_contents_1_t *list);
diff --git a/libhwcomposer/hwc_qclient.cpp b/libhwcomposer/hwc_qclient.cpp
index a3f6b5b..50e94c9 100644
--- a/libhwcomposer/hwc_qclient.cpp
+++ b/libhwcomposer/hwc_qclient.cpp
@@ -140,8 +140,34 @@
ctx->mBufferMirrorMode = enable;
}
+static status_t getDisplayVisibleRegion(hwc_context_t* ctx, int dpy,
+ Parcel* outParcel) {
+ // Get the info only if the dpy is valid
+ if(dpy >= HWC_DISPLAY_PRIMARY && dpy <= HWC_DISPLAY_VIRTUAL) {
+ Locker::Autolock _sl(ctx->mDrawLock);
+ if(dpy && (ctx->mExtOrientation || ctx->mBufferMirrorMode)) {
+ // Return the destRect on external, if external orienation
+ // is enabled
+ outParcel->writeInt32(ctx->dpyAttr[dpy].mDstRect.left);
+ outParcel->writeInt32(ctx->dpyAttr[dpy].mDstRect.top);
+ outParcel->writeInt32(ctx->dpyAttr[dpy].mDstRect.right);
+ outParcel->writeInt32(ctx->dpyAttr[dpy].mDstRect.bottom);
+ } else {
+ outParcel->writeInt32(ctx->mViewFrame[dpy].left);
+ outParcel->writeInt32(ctx->mViewFrame[dpy].top);
+ outParcel->writeInt32(ctx->mViewFrame[dpy].right);
+ outParcel->writeInt32(ctx->mViewFrame[dpy].bottom);
+ }
+ return NO_ERROR;
+ } else {
+ ALOGE("In %s: invalid dpy index %d", __FUNCTION__, dpy);
+ return BAD_VALUE;
+ }
+}
+
status_t QClient::notifyCallback(uint32_t command, const Parcel* inParcel,
Parcel* outParcel) {
+ status_t ret = NO_ERROR;
if (command > IQService::VPU_COMMAND_LIST_START &&
command < IQService::VPU_COMMAND_LIST_END) {
@@ -164,6 +190,10 @@
case IQService::BUFFER_MIRRORMODE:
setBufferMirrorMode(mHwcContext, inParcel->readInt32());
break;
+ case IQService::GET_DISPLAY_VISIBLE_REGION:
+ ret = getDisplayVisibleRegion(mHwcContext, inParcel->readInt32(),
+ outParcel);
+ break;
case IQService::CHECK_EXTERNAL_STATUS:
isExternalConnected(mHwcContext, outParcel);
break;
@@ -174,9 +204,9 @@
setHSIC(mHwcContext, inParcel);
break;
default:
- return NO_ERROR;
+ ret = NO_ERROR;
}
- return NO_ERROR;
+ return ret;
}
diff --git a/libhwcomposer/hwc_utils.cpp b/libhwcomposer/hwc_utils.cpp
index ff21659..0663f67 100644
--- a/libhwcomposer/hwc_utils.cpp
+++ b/libhwcomposer/hwc_utils.cpp
@@ -1188,7 +1188,8 @@
//Accumulate acquireFenceFds for MDP
for(uint32_t i = 0; i < list->numHwLayers; i++) {
- if(list->hwLayers[i].compositionType == HWC_OVERLAY &&
+ if((list->hwLayers[i].compositionType == HWC_OVERLAY ||
+ list->hwLayers[i].compositionType == HWC_BLIT) &&
list->hwLayers[i].acquireFenceFd >= 0) {
if(UNLIKELY(swapzero))
acquireFd[count++] = -1;
@@ -1230,6 +1231,7 @@
for(uint32_t i = 0; i < list->numHwLayers; i++) {
if(list->hwLayers[i].compositionType == HWC_OVERLAY ||
+ list->hwLayers[i].compositionType == HWC_BLIT ||
list->hwLayers[i].compositionType == HWC_FRAMEBUFFER_TARGET) {
//Populate releaseFenceFds.
if(UNLIKELY(swapzero)) {
diff --git a/libhwcomposer/hwc_utils.h b/libhwcomposer/hwc_utils.h
index 872b306..cd84f73 100644
--- a/libhwcomposer/hwc_utils.h
+++ b/libhwcomposer/hwc_utils.h
@@ -87,6 +87,8 @@
bool isConfiguring;
// External Display is in MDP Downscale mode indicator
bool mDownScaleMode;
+ // Ext dst Rect
+ hwc_rect_t mDstRect;
};
struct ListStats {
diff --git a/libmemtrack/kgsl.c b/libmemtrack/kgsl.c
index 4843742..6dd4e27 100644
--- a/libmemtrack/kgsl.c
+++ b/libmemtrack/kgsl.c
@@ -47,7 +47,7 @@
size_t allocated_records = min(*num_records, ARRAY_SIZE(record_templates));
int i;
FILE *fp;
- FILE *smaps_fp;
+ FILE *smaps_fp = NULL;
char line[1024];
char tmp[128];
size_t accounted_size = 0;
@@ -74,6 +74,7 @@
snprintf(tmp, sizeof(tmp), "/proc/%d/smaps", pid);
smaps_fp = fopen(tmp, "r");
if (smaps_fp == NULL) {
+ fclose(fp);
return -errno;
}
}
@@ -145,6 +146,8 @@
records[1].size_in_bytes = unaccounted_size;
}
+ if (smaps_fp)
+ fclose(smaps_fp);
fclose(fp);
return 0;
diff --git a/libqdutils/display_config.cpp b/libqdutils/display_config.cpp
index eaf5384..45b0211 100644
--- a/libqdutils/display_config.cpp
+++ b/libqdutils/display_config.cpp
@@ -91,4 +91,26 @@
ALOGE("%s: Failed to get external status err=%d", __FUNCTION__, err);
return err;
}
+
+int getDisplayVisibleRegion(int dpy, hwc_rect_t &rect) {
+ status_t err = FAILED_TRANSACTION;
+ sp<IQService> binder = getBinder();
+ Parcel inParcel, outParcel;
+ inParcel.writeInt32(dpy);
+ if(binder != NULL) {
+ err = binder->dispatch(IQService::GET_DISPLAY_VISIBLE_REGION,
+ &inParcel, &outParcel);
+ }
+ if(!err) {
+ rect.left = outParcel.readInt32();
+ rect.top = outParcel.readInt32();
+ rect.right = outParcel.readInt32();
+ rect.bottom = outParcel.readInt32();
+ } else {
+ ALOGE("%s: Failed to getVisibleRegion for dpy =%d: err = %d",
+ __FUNCTION__, dpy, err);
+ }
+ return err;
+}
+
}; //namespace
diff --git a/libqdutils/display_config.h b/libqdutils/display_config.h
index c7d8ce9..29edbef 100644
--- a/libqdutils/display_config.h
+++ b/libqdutils/display_config.h
@@ -29,6 +29,7 @@
#include <gralloc_priv.h>
#include <qdMetaData.h>
#include <mdp_version.h>
+#include <hardware/hwcomposer.h>
// This header is for clients to use to set/get global display configuration
// The functions in this header run in the client process and wherever necessary
@@ -69,4 +70,7 @@
// Returns 0 on success, negative values on errors
int setHSIC(int dpy, const HSICData_t& hsic_data);
+// get the active visible region for the display
+// Returns 0 on success, negative values on errors
+int getDisplayVisibleRegion(int dpy, hwc_rect_t &rect);
}; //namespace
diff --git a/libqservice/IQService.h b/libqservice/IQService.h
index d6e525a..f8e58ab 100644
--- a/libqservice/IQService.h
+++ b/libqservice/IQService.h
@@ -48,6 +48,7 @@
CHECK_EXTERNAL_STATUS, // Check status of external display
GET_DISPLAY_ATTRIBUTES, // Get display attributes
SET_HSIC_DATA, // Set HSIC on dspp
+ GET_DISPLAY_VISIBLE_REGION, // Get the visibleRegion for dpy
VPU_COMMAND_LIST_START = 100, //Reserved block for VPU commands
VPU_COMMAND_LIST_END = 200,
COMMAND_LIST_END = 400,