Merge "hwc: remove gpu fallback when there is rotation"
diff --git a/common.mk b/common.mk
index 4aa56b2..cfe84be 100644
--- a/common.mk
+++ b/common.mk
@@ -31,12 +31,12 @@
endif
ifeq ($(call is-board-platform-in-list, msm8974 msm8226 msm8610 apq8084 \
- mpq8092), true)
+ mpq8092 msm_bronze), true)
common_flags += -DVENUS_COLOR_FORMAT
common_flags += -DMDSS_TARGET
endif
-ifeq ($(call is-board-platform-in-list, mpq8092), true)
+ifeq ($(call is-board-platform-in-list, mpq8092 msm_bronze), true)
#XXX: Replace with check from MDP when available
common_flags += -DVPU_TARGET
endif
diff --git a/libcopybit/copybit.cpp b/libcopybit/copybit.cpp
index 65c78f7..3b7039c 100644
--- a/libcopybit/copybit.cpp
+++ b/libcopybit/copybit.cpp
@@ -126,9 +126,12 @@
switch (format) {
case HAL_PIXEL_FORMAT_RGB_565: return MDP_RGB_565;
case HAL_PIXEL_FORMAT_RGBX_8888: return MDP_RGBX_8888;
+ case HAL_PIXEL_FORMAT_BGRX_8888: return MDP_BGRX_8888;
case HAL_PIXEL_FORMAT_RGB_888: return MDP_RGB_888;
case HAL_PIXEL_FORMAT_RGBA_8888: return MDP_RGBA_8888;
case HAL_PIXEL_FORMAT_BGRA_8888: return MDP_BGRA_8888;
+ case HAL_PIXEL_FORMAT_YCrCb_422_I: return MDP_YCRYCB_H2V1;
+ case HAL_PIXEL_FORMAT_YCbCr_422_I: return MDP_YCBYCR_H2V1;
case HAL_PIXEL_FORMAT_YCrCb_422_SP: return MDP_Y_CRCB_H2V1;
case HAL_PIXEL_FORMAT_YCrCb_420_SP: return MDP_Y_CRCB_H2V2;
case HAL_PIXEL_FORMAT_YCbCr_422_SP: return MDP_Y_CBCR_H2V1;
@@ -229,7 +232,12 @@
/** copy the bits */
static int msm_copybit(struct copybit_context_t *dev, void const *list)
{
- int err = ioctl(dev->mFD, MSMFB_ASYNC_BLIT,
+ int err;
+ if (dev->relFence != -1) {
+ close(dev->relFence);
+ dev->relFence = -1;
+ }
+ err = ioctl(dev->mFD, MSMFB_ASYNC_BLIT,
(struct mdp_async_blit_req_list const*)list);
ALOGE_IF(err<0, "copyBits failed (%s)", strerror(errno));
if (err == 0) {
@@ -406,7 +414,6 @@
list->count = 0;
list->sync.acq_fen_fd_cnt = 0;
ctx->acqFence[list->sync.acq_fen_fd_cnt++] = acquireFenceFd;
- ctx->relFence = -1;
}
}
return 0;
@@ -513,9 +520,7 @@
if (++list->count == maxCount) {
status = msm_copybit(ctx, list);
- if (ctx->relFence != -1) {
- list->sync.acq_fen_fd_cnt = 0;
- }
+ list->sync.acq_fen_fd_cnt = 0;
list->count = 0;
}
}
@@ -523,9 +528,7 @@
//Before freeing the buffer we need buffer passed through blit call
if (list->count != 0) {
status = msm_copybit(ctx, list);
- if (ctx->relFence != -1) {
- list->sync.acq_fen_fd_cnt = 0;
- }
+ list->sync.acq_fen_fd_cnt = 0;
list->count = 0;
}
free_buffer(yv12_handle);
diff --git a/libexternal/external.cpp b/libexternal/external.cpp
index 89d63e9..045edd8 100644
--- a/libexternal/external.cpp
+++ b/libexternal/external.cpp
@@ -301,34 +301,37 @@
int hdmiEDIDFile = open(sysFsEDIDFilePath, O_RDONLY, 0);
int len = -1;
+ char edidStr[128] = {'\0'};
if (hdmiEDIDFile < 0) {
ALOGE("%s: edid_modes file '%s' not found",
__FUNCTION__, sysFsEDIDFilePath);
return false;
} else {
- len = read(hdmiEDIDFile, mEDIDs, sizeof(mEDIDs)-1);
+ len = read(hdmiEDIDFile, edidStr, sizeof(edidStr)-1);
ALOGD_IF(DEBUG, "%s: EDID string: %s length = %d",
- __FUNCTION__, mEDIDs, len);
+ __FUNCTION__, edidStr, len);
if ( len <= 0) {
ALOGE("%s: edid_modes file empty '%s'",
__FUNCTION__, sysFsEDIDFilePath);
+ edidStr[0] = '\0';
}
else {
- while (len > 1 && isspace(mEDIDs[len-1]))
+ while (len > 1 && isspace(edidStr[len-1])) {
--len;
- mEDIDs[len] = 0;
+ }
+ edidStr[len] = '\0';
}
}
close(hdmiEDIDFile);
if(len > 0) {
// Get EDID modes from the EDID strings
- mModeCount = parseResolution(mEDIDs, mEDIDModes);
+ mModeCount = parseResolution(edidStr, mEDIDModes);
ALOGD_IF(DEBUG, "%s: mModeCount = %d", __FUNCTION__,
mModeCount);
}
- return (strlen(mEDIDs) > 0);
+ return (len > 0);
}
bool ExternalDisplay::openFrameBuffer()
@@ -359,7 +362,6 @@
void ExternalDisplay::resetInfo()
{
memset(&mVInfo, 0, sizeof(mVInfo));
- memset(mEDIDs, 0, sizeof(mEDIDs));
memset(mEDIDModes, 0, sizeof(mEDIDModes));
mModeCount = 0;
mCurrentMode = -1;
@@ -586,7 +588,9 @@
// if primary resolution is more than the hdmi resolution
// configure dpy attr to primary resolution and set
// downscale mode
- if((priW * priH) > (width * height)) {
+ // Restrict this upto 1080p resolution max
+ if(((priW * priH) > (width * height)) &&
+ (priW <= qdutils::MAX_DISPLAY_DIM )) {
mHwcContext->dpyAttr[HWC_DISPLAY_EXTERNAL].xres = priW;
mHwcContext->dpyAttr[HWC_DISPLAY_EXTERNAL].yres = priH;
// HDMI is always in landscape, so always assign the higher
diff --git a/libexternal/external.h b/libexternal/external.h
index 1a3602d..646e7a6 100644
--- a/libexternal/external.h
+++ b/libexternal/external.h
@@ -75,7 +75,6 @@
int mFd;
int mFbNum;
int mCurrentMode;
- char mEDIDs[128];
int mEDIDModes[64];
int mModeCount;
bool mUnderscanSupported;
diff --git a/libgralloc/alloc_controller.cpp b/libgralloc/alloc_controller.cpp
index 89ac919..98047e4 100644
--- a/libgralloc/alloc_controller.cpp
+++ b/libgralloc/alloc_controller.cpp
@@ -152,6 +152,8 @@
case HAL_PIXEL_FORMAT_YV12:
case HAL_PIXEL_FORMAT_YCbCr_422_SP:
case HAL_PIXEL_FORMAT_YCrCb_422_SP:
+ case HAL_PIXEL_FORMAT_YCbCr_422_I:
+ case HAL_PIXEL_FORMAT_YCrCb_422_I:
stride = ALIGN(width, 16);
break;
case HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
@@ -182,12 +184,6 @@
IonController::IonController()
{
mIonAlloc = new IonAlloc();
- mUseTZProtection = false;
- char property[PROPERTY_VALUE_MAX];
- if ((property_get("persist.gralloc.cp.level3", property, NULL) <= 0) ||
- (atoi(property) != 1)) {
- mUseTZProtection = true;
- }
}
int IonController::allocate(alloc_data& data, int usage)
@@ -208,7 +204,7 @@
ionFlags |= ION_HEAP(ION_IOMMU_HEAP_ID);
if(usage & GRALLOC_USAGE_PROTECTED) {
- if ((mUseTZProtection) && (usage & GRALLOC_USAGE_PRIVATE_MM_HEAP)) {
+ if (usage & GRALLOC_USAGE_PRIVATE_MM_HEAP) {
ionFlags |= ION_HEAP(ION_CP_MM_HEAP_ID);
ionFlags |= ION_SECURE;
} else {
@@ -324,6 +320,8 @@
break;
case HAL_PIXEL_FORMAT_YCbCr_422_SP:
case HAL_PIXEL_FORMAT_YCrCb_422_SP:
+ case HAL_PIXEL_FORMAT_YCbCr_422_I:
+ case HAL_PIXEL_FORMAT_YCrCb_422_I:
if(width & 1) {
ALOGE("width is odd for the YUV422_SP format");
return -EINVAL;
diff --git a/libgralloc/alloc_controller.h b/libgralloc/alloc_controller.h
index 8954d39..5fe81fa 100644
--- a/libgralloc/alloc_controller.h
+++ b/libgralloc/alloc_controller.h
@@ -65,7 +65,6 @@
private:
IonAlloc* mIonAlloc;
- bool mUseTZProtection;
};
} //end namespace gralloc
diff --git a/libgralloc/gpu.cpp b/libgralloc/gpu.cpp
index ce15304..0136407 100644
--- a/libgralloc/gpu.cpp
+++ b/libgralloc/gpu.cpp
@@ -146,6 +146,10 @@
flags |= private_handle_t::PRIV_FLAGS_HW_TEXTURE;
}
+ if(usage & GRALLOC_USAGE_PRIVATE_SECURE_DISPLAY) {
+ flags |= private_handle_t::PRIV_FLAGS_SECURE_DISPLAY;
+ }
+
flags |= data.allocType;
int eBaseAddr = int(eData.base) + eData.offset;
private_handle_t *hnd = new private_handle_t(data.fd, size, flags,
diff --git a/libgralloc/gralloc_priv.h b/libgralloc/gralloc_priv.h
index 3187648..03d15a7 100644
--- a/libgralloc/gralloc_priv.h
+++ b/libgralloc/gralloc_priv.h
@@ -70,6 +70,9 @@
/* CAMERA heap is a carveout heap for camera, is not secured*/
GRALLOC_USAGE_PRIVATE_CAMERA_HEAP = 0x00400000,
+
+ /* This flag is used for SECURE display usecase */
+ GRALLOC_USAGE_PRIVATE_SECURE_DISPLAY = 0x00800000,
};
enum {
@@ -100,6 +103,8 @@
HAL_PIXEL_FORMAT_RG_88 = 0x10E,
HAL_PIXEL_FORMAT_YCbCr_444_SP = 0x10F,
HAL_PIXEL_FORMAT_YCrCb_444_SP = 0x110,
+ HAL_PIXEL_FORMAT_YCrCb_422_I = 0x111,
+ HAL_PIXEL_FORMAT_BGRX_8888 = 0x112,
HAL_PIXEL_FORMAT_INTERLACE = 0x180,
};
@@ -161,6 +166,7 @@
PRIV_FLAGS_ITU_R_601 = 0x00200000,
PRIV_FLAGS_ITU_R_601_FR = 0x00400000,
PRIV_FLAGS_ITU_R_709 = 0x00800000,
+ PRIV_FLAGS_SECURE_DISPLAY = 0x01000000,
};
// file-descriptors
diff --git a/libhwcomposer/Android.mk b/libhwcomposer/Android.mk
index 12b822a..4ef4286 100644
--- a/libhwcomposer/Android.mk
+++ b/libhwcomposer/Android.mk
@@ -25,7 +25,7 @@
hwc_dump_layers.cpp \
hwc_ad.cpp
-ifeq ($(call is-board-platform-in-list, mpq8092), true)
+ifeq ($(call is-board-platform-in-list, mpq8092 msm_bronze), true)
LOCAL_SRC_FILES += hwc_vpuclient.cpp
endif
diff --git a/libhwcomposer/hwc.cpp b/libhwcomposer/hwc.cpp
index e970d4c..148a0fc 100644
--- a/libhwcomposer/hwc.cpp
+++ b/libhwcomposer/hwc.cpp
@@ -138,7 +138,9 @@
static void handleGeomChange(hwc_context_t *ctx, int dpy,
hwc_display_contents_1_t *list) {
- if(list->flags & HWC_GEOMETRY_CHANGED) {
+ /* No point to calling overlay_set on MDP3 */
+ if(list->flags & HWC_GEOMETRY_CHANGED &&
+ ctx->mMDP.version >= qdutils::MDP_V4_0) {
ctx->mOverlay->forceSet(dpy);
}
}
@@ -446,6 +448,9 @@
supported |= HWC_DISPLAY_EXTERNAL_BIT;
value[0] = supported;
break;
+ case HWC_FORMAT_RB_SWAP:
+ value[0] = 1;
+ break;
default:
return -EINVAL;
}
@@ -494,7 +499,8 @@
}
}
- if(!Overlay::displayCommit(ctx->dpyAttr[dpy].fd)) {
+ if(!Overlay::displayCommit(ctx->dpyAttr[dpy].fd,
+ ctx->listStats[dpy].roi)) {
ALOGE("%s: display commit fail for %d dpy!", __FUNCTION__, dpy);
ret = -1;
}
@@ -766,6 +772,9 @@
ovDump[0] = '\0';
ctx->mRotMgr->getDump(ovDump, 1024);
dumpsys_log(aBuf, ovDump);
+ ovDump[0] = '\0';
+ Writeback::getInstance()->getDump(ovDump, 1024);
+ dumpsys_log(aBuf, ovDump);
strlcpy(buff, aBuf.string(), buff_len);
}
diff --git a/libhwcomposer/hwc_copybit.cpp b/libhwcomposer/hwc_copybit.cpp
index 3348ae0..d54a5f2 100644
--- a/libhwcomposer/hwc_copybit.cpp
+++ b/libhwcomposer/hwc_copybit.cpp
@@ -240,8 +240,8 @@
rect.bottom};
copybit_image_t buf;
- buf.w = ALIGN(hnd->width,32);
- buf.h = hnd->height;
+ buf.w = ALIGN(getWidth(hnd),32);
+ buf.h = getHeight(hnd);
buf.format = hnd->format;
buf.base = (void *)hnd->base;
buf.handle = (native_handle_t *)hnd;
@@ -268,19 +268,24 @@
return false;
}
- //Wait for the previous frame to complete before rendering onto it
- if(mRelFd[0] >=0) {
- sync_wait(mRelFd[0], 1000);
- close(mRelFd[0]);
- mRelFd[0] = -1;
- }
-
if (ctx->mMDP.version >= qdutils::MDP_V4_0) {
+ //Wait for the previous frame to complete before rendering onto it
+ if(mRelFd[0] >=0) {
+ sync_wait(mRelFd[0], 1000);
+ close(mRelFd[0]);
+ mRelFd[0] = -1;
+ }
+
//Clear the visible region on the render buffer
//XXX: Do this only when needed.
hwc_rect_t clearRegion;
getNonWormholeRegion(list, clearRegion);
clear(renderBuffer, clearRegion);
+ } else {
+ if(mRelFd[0] >=0) {
+ copybit_device_t *copybit = getCopyBitDevice();
+ copybit->set_sync(copybit, mRelFd[0]);
+ }
}
// numAppLayers-1, as we iterate from 0th layer index with HWC_COPYBIT flag
for (int i = 0; i <= (ctx->listStats[dpy].numAppLayers-1); i++) {
@@ -313,6 +318,10 @@
copybit_device_t *copybit = getCopyBitDevice();
// Async mode
copybit->flush_get_fence(copybit, fd);
+ if(mRelFd[0] >=0 && ctx->mMDP.version == qdutils::MDP_V3_0_4) {
+ close(mRelFd[0]);
+ mRelFd[0] = -1;
+ }
}
return true;
}
@@ -341,12 +350,22 @@
// Set the copybit source:
copybit_image_t src;
- src.w = hnd->width;
- src.h = hnd->height;
+ src.w = getWidth(hnd);
+ src.h = getHeight(hnd);
src.format = hnd->format;
+
+ // Handle R/B swap
+ if ((layer->flags & HWC_FORMAT_RB_SWAP)) {
+ if (src.format == HAL_PIXEL_FORMAT_RGBA_8888) {
+ src.format = HAL_PIXEL_FORMAT_BGRA_8888;
+ } else if (src.format == HAL_PIXEL_FORMAT_RGBX_8888) {
+ src.format = HAL_PIXEL_FORMAT_BGRX_8888;
+ }
+ }
+
src.base = (void *)hnd->base;
src.handle = (native_handle_t *)layer->handle;
- src.horiz_padding = src.w - hnd->width;
+ src.horiz_padding = src.w - getWidth(hnd);
// Initialize vertical padding to zero for now,
// this needs to change to accomodate vertical stride
// if needed in the future
@@ -431,7 +450,7 @@
dtdy > scaleLimitMax ||
dsdx < 1/scaleLimitMin ||
dtdy < 1/scaleLimitMin) {
- ALOGE("%s: greater than max supported size dsdx=%f dtdy=%f \
+ ALOGW("%s: greater than max supported size dsdx=%f dtdy=%f \
scaleLimitMax=%f scaleLimitMin=%f", __FUNCTION__,dsdx,dtdy,
scaleLimitMax,1/scaleLimitMin);
return -1;
diff --git a/libhwcomposer/hwc_dump_layers.cpp b/libhwcomposer/hwc_dump_layers.cpp
index cf23b65..1b3d097 100644
--- a/libhwcomposer/hwc_dump_layers.cpp
+++ b/libhwcomposer/hwc_dump_layers.cpp
@@ -236,7 +236,7 @@
// Log Line 1
ALOGI("Display[%s] Layer[%d] SrcBuff[%dx%d] SrcCrop[%dl, %dt, %dr, %db] "
"DispFrame[%dl, %dt, %dr, %db] VisRegsScr%s", mDisplayName, layerIndex,
- (hnd)? hnd->width : -1, (hnd)? hnd->height : -1,
+ (hnd)? getWidth(hnd) : -1, (hnd)? getHeight(hnd) : -1,
sourceCrop.left, sourceCrop.top,
sourceCrop.right, sourceCrop.bottom,
displayFrame.left, displayFrame.top,
@@ -328,7 +328,7 @@
break;
}
if (SkBitmap::kNo_Config != tempSkBmpConfig) {
- tempSkBmp->setConfig(tempSkBmpConfig, hnd->width, hnd->height);
+ tempSkBmp->setConfig(tempSkBmpConfig, getWidth(hnd), getHeight(hnd));
tempSkBmp->setPixels((void*)hnd->base);
bResult = SkImageEncoder::EncodeFile(dumpFilename,
*tempSkBmp, SkImageEncoder::kPNG_Type, 100);
@@ -348,7 +348,7 @@
bool bResult = false;
sprintf(dumpFilename, "%s/sfdump%03d.layer%d.%dx%d.%s.%s.raw",
mDumpDirRaw, mDumpCntrRaw,
- layerIndex, hnd->width, hnd->height,
+ layerIndex, getWidth(hnd), getHeight(hnd),
pixFormatStr, mDisplayName);
FILE* fp = fopen(dumpFilename, "w+");
if (NULL != fp) {
@@ -400,6 +400,10 @@
case HAL_PIXEL_FORMAT_YCbCr_422_I:
strcpy(pixFormatStr, "YCbCr_422_I_YUY2");
break;
+ case HAL_PIXEL_FORMAT_YCrCb_422_I:
+ strlcpy(pixFormatStr, "YCrCb_422_I_YVYU",
+ sizeof("YCrCb_422_I_YVYU"));
+ break;
case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
strcpy(pixFormatStr, "NV12_ENCODEABLE");
break;
diff --git a/libhwcomposer/hwc_fbupdate.cpp b/libhwcomposer/hwc_fbupdate.cpp
index 53b3d18..12177b3 100644
--- a/libhwcomposer/hwc_fbupdate.cpp
+++ b/libhwcomposer/hwc_fbupdate.cpp
@@ -106,7 +106,7 @@
}
overlay::Overlay& ov = *(ctx->mOverlay);
private_handle_t *hnd = (private_handle_t *)layer->handle;
- ovutils::Whf info(hnd->width, hnd->height,
+ ovutils::Whf info(getWidth(hnd), getHeight(hnd),
ovutils::getMdpFormat(hnd->format), hnd->size);
//Request a pipe
@@ -244,7 +244,7 @@
}
overlay::Overlay& ov = *(ctx->mOverlay);
private_handle_t *hnd = (private_handle_t *)layer->handle;
- ovutils::Whf info(hnd->width, hnd->height,
+ ovutils::Whf info(getWidth(hnd), getHeight(hnd),
ovutils::getMdpFormat(hnd->format), hnd->size);
//Request left pipe
diff --git a/libhwcomposer/hwc_mdpcomp.cpp b/libhwcomposer/hwc_mdpcomp.cpp
index d388ebb..bb52728 100644
--- a/libhwcomposer/hwc_mdpcomp.cpp
+++ b/libhwcomposer/hwc_mdpcomp.cpp
@@ -41,6 +41,7 @@
bool MDPComp::sDebugLogs = false;
bool MDPComp::sEnabled = false;
bool MDPComp::sEnableMixedMode = true;
+bool MDPComp::sEnablePartialFrameUpdate = false;
int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
float MDPComp::sMaxBw = 2.3f;
uint32_t MDPComp::sCompBytesClaimed = 0;
@@ -62,11 +63,8 @@
dumpsys_log(buf,"HWC Map for Dpy: %s \n",
(mDpy == 0) ? "\"PRIMARY\"" :
(mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\"");
- dumpsys_log(buf,"PREV_FRAME: layerCount:%2d mdpCount:%2d \
- cacheCount:%2d \n", mCachedFrame.layerCount,
- mCachedFrame.mdpCount, mCachedFrame.cacheCount);
- dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d \
- fbCount:%2d \n", mCurrentFrame.layerCount,
+ dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d "
+ "fbCount:%2d \n", mCurrentFrame.layerCount,
mCurrentFrame.mdpCount, mCurrentFrame.fbCount);
dumpsys_log(buf,"needsFBRedraw:%3s pipesUsed:%2d MaxPipesPerMixer: %d \n",
(mCurrentFrame.needsRedraw? "YES" : "NO"),
@@ -78,9 +76,10 @@
dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n",
index,
(mCurrentFrame.isFBComposed[index] ? "YES" : "NO"),
- mCurrentFrame.layerToMDP[index],
+ mCurrentFrame.layerToMDP[index],
(mCurrentFrame.isFBComposed[index] ?
- (mCurrentFrame.needsRedraw ? "GLES" : "CACHE") : "MDP"),
+ (mCurrentFrame.drop[index] ? "DROP" :
+ (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"),
(mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ :
mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder));
dumpsys_log(buf,"\n");
@@ -109,12 +108,19 @@
sEnableMixedMode = false;
}
- sDebugLogs = false;
if(property_get("debug.mdpcomp.logs", property, NULL) > 0) {
if(atoi(property) != 0)
sDebugLogs = true;
}
+ if(property_get("persist.hwc.partialupdate.enable", property, NULL) > 0) {
+ if((atoi(property) != 0) && ctx->mMDP.panel == MIPI_CMD_PANEL &&
+ qdutils::MDPVersion::getInstance().is8x74v2())
+ sEnablePartialFrameUpdate = true;
+ }
+ ALOGE_IF(isDebug(), "%s: Partial Update applicable?: %d",__FUNCTION__,
+ sEnablePartialFrameUpdate);
+
sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) {
int val = atoi(property);
@@ -185,8 +191,11 @@
layer->compositionType = HWC_OVERLAY;
layer->hints |= HWC_HINT_CLEAR_FB;
} else {
- if(!mCurrentFrame.needsRedraw)
+ /* Drop the layer when its already present in FB OR when it lies
+ * outside frame's ROI */
+ if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) {
layer->compositionType = HWC_OVERLAY;
+ }
}
}
}
@@ -233,10 +242,9 @@
void MDPComp::LayerCache::reset() {
memset(&hnd, 0, sizeof(hnd));
- mdpCount = 0;
- cacheCount = 0;
+ memset(&isFBComposed, true, sizeof(isFBComposed));
+ memset(&drop, false, sizeof(drop));
layerCount = 0;
- fbZ = -1;
}
void MDPComp::LayerCache::cacheAll(hwc_display_contents_1_t* list) {
@@ -247,10 +255,32 @@
}
void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) {
- mdpCount = curFrame.mdpCount;
- cacheCount = curFrame.fbCount;
layerCount = curFrame.layerCount;
- fbZ = curFrame.fbZ;
+ memcpy(&isFBComposed, &curFrame.isFBComposed, sizeof(isFBComposed));
+ memcpy(&drop, &curFrame.drop, sizeof(drop));
+}
+
+bool MDPComp::LayerCache::isSameFrame(const FrameInfo& curFrame) {
+ if(layerCount != curFrame.layerCount)
+ return false;
+ for(int i = 0; i < curFrame.layerCount; i++) {
+ if((curFrame.isFBComposed[i] != isFBComposed[i]) ||
+ (curFrame.drop[i] != drop[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) {
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if((not isYuvBuffer(hnd) and has90Transform(layer)) or
+ (not isValidDimension(ctx,layer))
+ //More conditions here, SKIP, sRGB+Blend etc
+ ) {
+ return false;
+ }
+ return true;
}
bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) {
@@ -368,6 +398,91 @@
return ret;
}
+bool MDPComp::validateAndApplyROI(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list, hwc_rect_t roi) {
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+
+ if(!isValidRect(roi))
+ return false;
+
+ for(int i = 0; i < numAppLayers; i++){
+ const hwc_layer_1_t* layer = &list->hwLayers[i];
+
+ hwc_rect_t dstRect = layer->displayFrame;
+ hwc_rect_t srcRect = layer->sourceCrop;
+ int transform = layer->transform;
+ trimLayer(ctx, mDpy, transform, srcRect, dstRect);
+
+ hwc_rect_t res = getIntersection(roi, dstRect);
+
+ int res_w = res.right - res.left;
+ int res_h = res.bottom - res.top;
+ int dst_w = dstRect.right - dstRect.left;
+ int dst_h = dstRect.bottom - dstRect.top;
+
+ if(!isValidRect(res)) {
+ mCurrentFrame.drop[i] = true;
+ mCurrentFrame.dropCount++;
+ }else {
+ /* Reset frame ROI when any layer which needs scaling also needs ROI
+ * cropping */
+ if((res_w != dst_w || res_h != dst_h) &&
+ needsScaling (ctx, layer, mDpy)) {
+ ALOGE("%s: Resetting ROI due to scaling", __FUNCTION__);
+ memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
+ mCurrentFrame.dropCount = 0;
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void MDPComp::generateROI(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+
+ if(!sEnablePartialFrameUpdate) {
+ return;
+ }
+
+ if(mDpy || isDisplaySplit(ctx, mDpy)){
+ ALOGE_IF(isDebug(), "%s: ROI not supported for"
+ "the (1) external / virtual display's (2) dual DSI displays",
+ __FUNCTION__);
+ return;
+ }
+
+ if(list->flags & HWC_GEOMETRY_CHANGED)
+ return;
+
+ struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0};
+ for(int index = 0; index < numAppLayers; index++ ) {
+ if ((mCachedFrame.hnd[index] != list->hwLayers[index].handle) ||
+ isYuvBuffer((private_handle_t *)list->hwLayers[index].handle)) {
+ hwc_rect_t dstRect = list->hwLayers[index].displayFrame;
+ hwc_rect_t srcRect = list->hwLayers[index].sourceCrop;
+ int transform = list->hwLayers[index].transform;
+
+ /* Intersect against display boundaries */
+ trimLayer(ctx, mDpy, transform, srcRect, dstRect);
+ roi = getUnion(roi, dstRect);
+ }
+ }
+
+ if(!validateAndApplyROI(ctx, list, roi)){
+ roi = (struct hwc_rect) {0, 0,
+ (int)ctx->dpyAttr[mDpy].xres, (int)ctx->dpyAttr[mDpy].yres};
+ }
+
+ ctx->listStats[mDpy].roi.x = roi.left;
+ ctx->listStats[mDpy].roi.y = roi.top;
+ ctx->listStats[mDpy].roi.w = roi.right - roi.left;
+ ctx->listStats[mDpy].roi.h = roi.bottom - roi.top;
+
+ ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__,
+ roi.left, roi.top, roi.right, roi.bottom);
+}
+
/* Checks for conditions where all the layers marked for MDP comp cannot be
* bypassed. On such conditions we try to bypass atleast YUV layers */
bool MDPComp::isFullFrameDoable(hwc_context_t *ctx,
@@ -375,17 +490,11 @@
const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
- if(sIdleFallBack) {
+ if(sIdleFallBack && !ctx->listStats[mDpy].secureUI) {
ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy);
return false;
}
- if(mDpy > HWC_DISPLAY_PRIMARY){
- ALOGD_IF(isDebug(), "%s: Cannot support External display(s)",
- __FUNCTION__);
- return false;
- }
-
if(isSkipPresent(ctx, mDpy)) {
ALOGD_IF(isDebug(),"%s: SKIP present: %d",
__FUNCTION__,
@@ -399,27 +508,17 @@
return false;
}
- //MDP composition is not efficient if layer needs rotator.
for(int i = 0; i < numAppLayers; ++i) {
- // As MDP h/w supports flip operation, use MDP comp only for
- // 180 transforms. Fail for any transform involving 90 (90, 270).
hwc_layer_1_t* layer = &list->hwLayers[i];
private_handle_t *hnd = (private_handle_t *)layer->handle;
- if(layer->transform & HWC_TRANSFORM_ROT_90) {
- if(!isYuvBuffer(hnd) ) {
- ALOGD_IF(isDebug(), "%s: orientation involved",__FUNCTION__);
- return false;
- }else if(!canUseRotator(ctx, mDpy)) {
- ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__);
+ if(isYuvBuffer(hnd) && has90Transform(layer)) {
+ if(!canUseRotator(ctx, mDpy)) {
+ ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d",
+ __FUNCTION__, mDpy);
return false;
}
}
- if(!isValidDimension(ctx,layer)) {
- ALOGD_IF(isDebug(), "%s: Buffer is of invalid width",
- __FUNCTION__);
- return false;
- }
//For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp
// may not need it if Gfx pre-rotation can handle all flips & rotations
@@ -445,14 +544,29 @@
}
bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
- //Setup mCurrentFrame
- mCurrentFrame.mdpCount = mCurrentFrame.layerCount;
+ //Will benefit presentation / secondary-only layer.
+ if((mDpy > HWC_DISPLAY_PRIMARY) &&
+ (list->numHwLayers - 1) > MAX_SEC_LAYERS) {
+ ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
+ return false;
+ }
+
+ const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+ for(int i = 0; i < numAppLayers; i++) {
+ hwc_layer_1_t* layer = &list->hwLayers[i];
+ if(not isSupportedForMDPComp(ctx, layer)) {
+ ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
+ return false;
+ }
+ }
mCurrentFrame.fbCount = 0;
mCurrentFrame.fbZ = -1;
- memset(&mCurrentFrame.isFBComposed, 0, sizeof(mCurrentFrame.isFBComposed));
+ memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop,
+ sizeof(mCurrentFrame.isFBComposed));
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount -
+ mCurrentFrame.dropCount;
- int mdpCount = mCurrentFrame.mdpCount;
- if(mdpCount > sMaxPipesPerMixer) {
+ if(mCurrentFrame.mdpCount > sMaxPipesPerMixer) {
ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
return false;
}
@@ -472,20 +586,57 @@
bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list)
{
- int numAppLayers = ctx->listStats[mDpy].numAppLayers;
-
if(!sEnableMixedMode) {
//Mixed mode is disabled. No need to even try caching.
return false;
}
- //Setup mCurrentFrame
+ bool ret = false;
+ if(isLoadBasedCompDoable(ctx, list)) {
+ ret = loadBasedComp(ctx, list);
+ }
+
+ if(!ret) {
+ ret = cacheBasedComp(ctx, list);
+ }
+
+ return ret;
+}
+
+bool MDPComp::cacheBasedComp(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
mCurrentFrame.reset(numAppLayers);
updateLayerCache(ctx, list);
- updateYUV(ctx, list);
- batchLayers(); //sets up fbZ also
+
+ //If an MDP marked layer is unsupported cannot do partial MDP Comp
+ for(int i = 0; i < numAppLayers; i++) {
+ if(!mCurrentFrame.isFBComposed[i]) {
+ hwc_layer_1_t* layer = &list->hwLayers[i];
+ if(not isSupportedForMDPComp(ctx, layer)) {
+ ALOGD_IF(isDebug(), "%s: Unsupported layer in list",
+ __FUNCTION__);
+ return false;
+ }
+ }
+ }
+
+ updateYUV(ctx, list, false /*secure only*/);
+ bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
+ if(!ret) {
+ ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
+ return false;
+ }
int mdpCount = mCurrentFrame.mdpCount;
+
+ //Will benefit cases where a video has non-updating background.
+ if((mDpy > HWC_DISPLAY_PRIMARY) and
+ (mdpCount > MAX_SEC_LAYERS)) {
+ ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
+ return false;
+ }
+
if(mdpCount > (sMaxPipesPerMixer - 1)) { // -1 since FB is used
ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
return false;
@@ -504,13 +655,85 @@
return true;
}
-bool MDPComp::isOnlyVideoDoable(hwc_context_t *ctx,
- hwc_display_contents_1_t* list){
+bool MDPComp::loadBasedComp(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
int numAppLayers = ctx->listStats[mDpy].numAppLayers;
mCurrentFrame.reset(numAppLayers);
- updateYUV(ctx, list);
+
+ //TODO BatchSize could be optimized further based on available pipes, split
+ //displays etc.
+ const int batchSize = numAppLayers - (sMaxPipesPerMixer - 1);
+ if(batchSize <= 0) {
+ ALOGD_IF(isDebug(), "%s: Not attempting", __FUNCTION__);
+ return false;
+ }
+
+ int minBatchStart = -1;
+ size_t minBatchPixelCount = SIZE_MAX;
+
+ for(int i = 0; i <= numAppLayers - batchSize; i++) {
+ uint32_t batchPixelCount = 0;
+ for(int j = i; j < i + batchSize; j++) {
+ hwc_layer_1_t* layer = &list->hwLayers[j];
+ hwc_rect_t crop = layer->sourceCrop;
+ batchPixelCount += (crop.right - crop.left) *
+ (crop.bottom - crop.top);
+ }
+
+ if(batchPixelCount < minBatchPixelCount) {
+ minBatchPixelCount = batchPixelCount;
+ minBatchStart = i;
+ }
+ }
+
+ if(minBatchStart < 0) {
+ ALOGD_IF(isDebug(), "%s: No batch found batchSize %d numAppLayers %d",
+ __FUNCTION__, batchSize, numAppLayers);
+ return false;
+ }
+
+ for(int i = 0; i < numAppLayers; i++) {
+ if(i < minBatchStart || i >= minBatchStart + batchSize) {
+ hwc_layer_1_t* layer = &list->hwLayers[i];
+ if(not isSupportedForMDPComp(ctx, layer)) {
+ ALOGD_IF(isDebug(), "%s: MDP unsupported layer found at %d",
+ __FUNCTION__, i);
+ return false;
+ }
+ mCurrentFrame.isFBComposed[i] = false;
+ }
+ }
+
+ mCurrentFrame.fbZ = minBatchStart;
+ mCurrentFrame.fbCount = batchSize;
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount - batchSize;
+
+ if(!arePipesAvailable(ctx, list)) {
+ return false;
+ }
+
+ ALOGD_IF(isDebug(), "%s: fbZ %d batchSize %d",
+ __FUNCTION__, mCurrentFrame.fbZ, batchSize);
+ return true;
+}
+
+bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ if(mDpy or isSecurePresent(ctx, mDpy) or
+ not (list->flags & HWC_GEOMETRY_CHANGED)) {
+ return false;
+ }
+ return true;
+}
+
+bool MDPComp::isOnlyVideoDoable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list, bool secureOnly) {
+ int numAppLayers = ctx->listStats[mDpy].numAppLayers;
+
+ mCurrentFrame.reset(numAppLayers);
+ updateYUV(ctx, list, secureOnly);
int mdpCount = mCurrentFrame.mdpCount;
- int fbNeeded = int(mCurrentFrame.fbCount != 0);
+ int fbNeeded = (mCurrentFrame.fbCount != 0);
if(!isYuvPresent(ctx, mDpy)) {
return false;
@@ -572,65 +795,177 @@
return true;
}
-void MDPComp::batchLayers() {
- /* Idea is to keep as many contiguous non-updating(cached) layers in FB and
- * send rest of them through MDP. NEVER mark an updating layer for caching.
- * But cached ones can be marked for MDP*/
+/* starts at fromIndex and check for each layer to find
+ * if it it has overlapping with any Updating layer above it in zorder
+ * till the end of the batch. returns true if it finds any intersection */
+bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list,
+ int fromIndex, int toIndex) {
+ for(int i = fromIndex; i < toIndex; i++) {
+ if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
+ if(intersectingUpdatingLayers(list, i+1, toIndex, i)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+/* Checks if given layer at targetLayerIndex has any
+ * intersection with all the updating layers in beween
+ * fromIndex and toIndex. Returns true if it finds intersectiion */
+bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
+ int fromIndex, int toIndex, int targetLayerIndex) {
+ for(int i = fromIndex; i <= toIndex; i++) {
+ if(!mCurrentFrame.isFBComposed[i]) {
+ if(areLayersIntersecting(&list->hwLayers[i],
+ &list->hwLayers[targetLayerIndex])) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+int MDPComp::getBatch(hwc_display_contents_1_t* list,
+ int& maxBatchStart, int& maxBatchEnd,
+ int& maxBatchCount) {
+ int i = 0;
+ int updatingLayersAbove = 0;//Updating layer count in middle of batch
+ int fbZOrder =-1;
+ while (i < mCurrentFrame.layerCount) {
+ int batchCount = 0;
+ int batchStart = i;
+ int batchEnd = i;
+ int fbZ = batchStart;
+ int firstZReverseIndex = -1;
+ while(i < mCurrentFrame.layerCount) {
+ if(!mCurrentFrame.isFBComposed[i]) {
+ if(!batchCount) {
+ i++;
+ break;
+ }
+ updatingLayersAbove++;
+ i++;
+ continue;
+ } else {
+ if(mCurrentFrame.drop[i]) {
+ i++;
+ continue;
+ } else if(updatingLayersAbove <= 0) {
+ batchCount++;
+ batchEnd = i;
+ i++;
+ continue;
+ } else { //Layer is FBComposed, not a drop & updatingLayer > 0
+
+ // We have a valid updating layer already. If layer-i not
+ // have overlapping with all updating layers in between
+ // batch-start and i, then we can add layer i to batch.
+ if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) {
+ batchCount++;
+ batchEnd = i;
+ i++;
+ continue;
+ } else if(canPushBatchToTop(list, batchStart, i)) {
+ //If All the non-updating layers with in this batch
+ //does not have intersection with the updating layers
+ //above in z-order, then we can safely move the batch to
+ //higher z-order. Increment fbZ as it is moving up.
+ if( firstZReverseIndex < 0) {
+ firstZReverseIndex = i;
+ }
+ batchCount++;
+ batchEnd = i;
+ fbZ += updatingLayersAbove;
+ i++;
+ updatingLayersAbove = 0;
+ continue;
+ } else {
+ //both failed.start the loop again from here.
+ if(firstZReverseIndex >= 0) {
+ i = firstZReverseIndex;
+ }
+ break;
+ }
+ }
+ }
+ }
+ if(batchCount > maxBatchCount) {
+ maxBatchCount = batchCount;
+ maxBatchStart = batchStart;
+ maxBatchEnd = batchEnd;
+ fbZOrder = fbZ;
+ }
+ }
+ return fbZOrder;
+}
+
+bool MDPComp::markLayersForCaching(hwc_context_t* ctx,
+ hwc_display_contents_1_t* list) {
+ /* Idea is to keep as many non-updating(cached) layers in FB and
+ * send rest of them through MDP. This is done in 2 steps.
+ * 1. Find the maximum contiguous batch of non-updating layers.
+ * 2. See if we can improve this batch size for caching by adding
+ * opaque layers around the batch, if they don't have
+ * any overlapping with the updating layers in between.
+ * NEVER mark an updating layer for caching.
+ * But cached ones can be marked for MDP */
int maxBatchStart = -1;
+ int maxBatchEnd = -1;
int maxBatchCount = 0;
+ int fbZ = -1;
/* All or Nothing is cached. No batching needed */
if(!mCurrentFrame.fbCount) {
mCurrentFrame.fbZ = -1;
- return;
+ return true;
}
if(!mCurrentFrame.mdpCount) {
mCurrentFrame.fbZ = 0;
- return;
+ return true;
}
- /* Search for max number of contiguous (cached) layers */
- int i = 0;
- while (i < mCurrentFrame.layerCount) {
- int count = 0;
- while(mCurrentFrame.isFBComposed[i] && i < mCurrentFrame.layerCount) {
- count++; i++;
- }
- if(count > maxBatchCount) {
- maxBatchCount = count;
- maxBatchStart = i - count;
- mCurrentFrame.fbZ = maxBatchStart;
- }
- if(i < mCurrentFrame.layerCount) i++;
- }
+ fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount);
- /* reset rest of the layers for MDP comp */
+ /* reset rest of the layers lying inside ROI for MDP comp */
for(int i = 0; i < mCurrentFrame.layerCount; i++) {
- if(i != maxBatchStart){
- mCurrentFrame.isFBComposed[i] = false;
- } else {
- i += maxBatchCount;
+ hwc_layer_1_t* layer = &list->hwLayers[i];
+ if((i < maxBatchStart || i > maxBatchEnd) &&
+ mCurrentFrame.isFBComposed[i]){
+ if(!mCurrentFrame.drop[i]){
+ //If an unsupported layer is being attempted to
+ //be pulled out we should fail
+ if(not isSupportedForMDPComp(ctx, layer)) {
+ return false;
+ }
+ mCurrentFrame.isFBComposed[i] = false;
+ }
}
}
+ // update the frame data
+ mCurrentFrame.fbZ = fbZ;
mCurrentFrame.fbCount = maxBatchCount;
mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
- mCurrentFrame.fbCount;
+ mCurrentFrame.fbCount - mCurrentFrame.dropCount;
ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
- mCurrentFrame.fbCount);
+ mCurrentFrame.fbCount);
+
+ return true;
}
void MDPComp::updateLayerCache(hwc_context_t* ctx,
- hwc_display_contents_1_t* list) {
-
+ hwc_display_contents_1_t* list) {
int numAppLayers = ctx->listStats[mDpy].numAppLayers;
- int numCacheableLayers = 0;
+ int fbCount = 0;
for(int i = 0; i < numAppLayers; i++) {
+ hwc_layer_1_t* layer = &list->hwLayers[i];
if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) {
- numCacheableLayers++;
+ if(!mCurrentFrame.drop[i])
+ fbCount++;
mCurrentFrame.isFBComposed[i] = true;
} else {
mCurrentFrame.isFBComposed[i] = false;
@@ -638,14 +973,17 @@
}
}
- mCurrentFrame.fbCount = numCacheableLayers;
- mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
- mCurrentFrame.fbCount;
- ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, numCacheableLayers);
+ mCurrentFrame.fbCount = fbCount;
+ mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount
+ - mCurrentFrame.dropCount;
+
+ ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d"
+ ,__FUNCTION__, mCurrentFrame.mdpCount, mCurrentFrame.fbCount,
+ mCurrentFrame.dropCount);
}
-void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list) {
-
+void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
+ bool secureOnly) {
int nYuvCount = ctx->listStats[mDpy].yuvCount;
if(!nYuvCount && mDpy) {
//Reset "No animation on external display" related parameters.
@@ -667,15 +1005,18 @@
}
} else {
if(mCurrentFrame.isFBComposed[nYuvIndex]) {
- mCurrentFrame.isFBComposed[nYuvIndex] = false;
- mCurrentFrame.fbCount--;
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if(!secureOnly || isSecureBuffer(hnd)) {
+ mCurrentFrame.isFBComposed[nYuvIndex] = false;
+ mCurrentFrame.fbCount--;
+ }
}
}
}
mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
- mCurrentFrame.fbCount;
- ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
+ mCurrentFrame.fbCount - mCurrentFrame.dropCount;
+ ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__,
mCurrentFrame.fbCount);
}
@@ -685,24 +1026,25 @@
return false;
}
- bool fbBatch = false;
for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
index++) {
if(!mCurrentFrame.isFBComposed[index]) {
int mdpIndex = mCurrentFrame.layerToMDP[index];
hwc_layer_1_t* layer = &list->hwLayers[index];
+ //Leave fbZ for framebuffer. CACHE/GLES layers go here.
+ if(mdpNextZOrder == mCurrentFrame.fbZ) {
+ mdpNextZOrder++;
+ }
MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
cur_pipe->zOrder = mdpNextZOrder++;
+
if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
layer %d",__FUNCTION__, index);
return false;
}
- } else if(fbBatch == false) {
- mdpNextZOrder++;
- fbBatch = true;
}
}
@@ -748,9 +1090,12 @@
private_handle_t *hnd = (private_handle_t *)layer->handle;
if (hnd) {
hwc_rect_t crop = layer->sourceCrop;
+ hwc_rect_t dst = layer->displayFrame;
+ trimLayer(ctx, mDpy, layer->transform, crop, dst);
float bpp = ((float)hnd->size) / (hnd->width * hnd->height);
- size += bpp * ((crop.right - crop.left) *
- (crop.bottom - crop.top));
+ size += bpp * (crop.right - crop.left) *
+ (crop.bottom - crop.top) *
+ ctx->dpyAttr[mDpy].yres / (dst.bottom - dst.top);
}
}
}
@@ -785,15 +1130,17 @@
//reset old data
mCurrentFrame.reset(numLayers);
+ memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
+ mCurrentFrame.dropCount = 0;
//number of app layers exceeds MAX_NUM_APP_LAYERS fall back to GPU
//do not cache the information for next draw cycle.
if(numLayers > MAX_NUM_APP_LAYERS) {
mCachedFrame.updateCounts(mCurrentFrame);
- ALOGD_IF(isDebug(), "%s: Number of App layers exceeded the limit ",
- __FUNCTION__);
+ ALOGE("%s: Number of App layers exceeded the limit ",
+ __FUNCTION__);
ret = -1;
- goto exit;
+ return ret;
}
//Hard conditions, if not met, cannot do MDP comp
@@ -805,6 +1152,8 @@
goto exit;
}
+ generateROI(ctx, list);
+
//Check whether layers marked for MDP Composition is actually doable.
if(isFullFrameDoable(ctx, list)) {
mCurrentFrame.map();
@@ -828,18 +1177,14 @@
} else { //Success
//Any change in composition types needs an FB refresh
mCurrentFrame.needsRedraw = false;
- if(mCurrentFrame.fbCount &&
- ((mCurrentFrame.mdpCount != mCachedFrame.mdpCount) ||
- (mCurrentFrame.fbCount != mCachedFrame.cacheCount) ||
- (mCurrentFrame.fbZ != mCachedFrame.fbZ) ||
- (!mCurrentFrame.mdpCount) ||
+ if(!mCachedFrame.isSameFrame(mCurrentFrame) ||
(list->flags & HWC_GEOMETRY_CHANGED) ||
- isSkipPresent(ctx, mDpy) ||
- (mDpy > HWC_DISPLAY_PRIMARY))) {
+ isSkipPresent(ctx, mDpy)) {
mCurrentFrame.needsRedraw = true;
}
}
- } else if(isOnlyVideoDoable(ctx, list)) {
+ } else if(isOnlyVideoDoable(ctx, list, false /*secure only*/) ||
+ isOnlyVideoDoable(ctx, list, true /*secure only*/)) {
//All layers marked for MDP comp cannot be bypassed.
//Try to compose atleast YUV layers through MDP comp and let
//all the RGB layers compose in FB
@@ -874,6 +1219,7 @@
//UpdateLayerFlags
setMDPCompLayerFlags(ctx, list);
+ mCachedFrame.cacheAll(list);
mCachedFrame.updateCounts(mCurrentFrame);
// unlock it before calling dump function to avoid deadlock
@@ -926,6 +1272,36 @@
return false;
}
+ if(not areVGPipesAvailable(ctx, list)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool MDPCompNonSplit::areVGPipesAvailable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ overlay::Overlay& ov = *ctx->mOverlay;
+ int pipesNeeded = 0;
+ for(int i = 0; i < mCurrentFrame.layerCount; ++i) {
+ if(!mCurrentFrame.isFBComposed[i]) {
+ hwc_layer_1_t* layer = &list->hwLayers[i];
+ hwc_rect_t dst = layer->displayFrame;
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if(isYuvBuffer(hnd)) {
+ pipesNeeded++;
+ }
+ }
+ }
+
+ int availableVGPipes = ov.availablePipes(mDpy, ovutils::OV_MDP_PIPE_VG);
+ if(pipesNeeded > availableVGPipes) {
+ ALOGD_IF(isDebug(), "%s: Insufficient VG pipes for video layers"
+ "dpy %d needed %d, avail %d",
+ __FUNCTION__, mDpy, pipesNeeded, availableVGPipes);
+ return false;
+ }
+
return true;
}
@@ -1094,6 +1470,42 @@
return false;
}
+ if(not areVGPipesAvailable(ctx, list)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool MDPCompSplit::areVGPipesAvailable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) {
+ overlay::Overlay& ov = *ctx->mOverlay;
+ int pipesNeeded = 0;
+ const int lSplit = getLeftSplit(ctx, mDpy);
+ for(int i = 0; i < mCurrentFrame.layerCount; ++i) {
+ if(!mCurrentFrame.isFBComposed[i]) {
+ hwc_layer_1_t* layer = &list->hwLayers[i];
+ hwc_rect_t dst = layer->displayFrame;
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if(isYuvBuffer(hnd)) {
+ if(dst.left < lSplit) {
+ pipesNeeded++;
+ }
+ if(dst.right > lSplit) {
+ pipesNeeded++;
+ }
+ }
+ }
+ }
+
+ int availableVGPipes = ov.availablePipes(mDpy, ovutils::OV_MDP_PIPE_VG);
+ if(pipesNeeded > availableVGPipes) {
+ ALOGD_IF(isDebug(), "%s: Insufficient VG pipes for video layers"
+ "dpy %d needed %d, avail %d",
+ __FUNCTION__, mDpy, pipesNeeded, availableVGPipes);
+ return false;
+ }
+
return true;
}
diff --git a/libhwcomposer/hwc_mdpcomp.h b/libhwcomposer/hwc_mdpcomp.h
index da4b330..1d5d715 100644
--- a/libhwcomposer/hwc_mdpcomp.h
+++ b/libhwcomposer/hwc_mdpcomp.h
@@ -55,6 +55,8 @@
static void reset() { sCompBytesClaimed = 0; };
protected:
+ enum { MAX_SEC_LAYERS = 1 }; //TODO add property support
+
enum ePipeType {
MDPCOMP_OV_RGB = ovutils::OV_MDP_PIPE_RGB,
MDPCOMP_OV_VG = ovutils::OV_MDP_PIPE_VG,
@@ -88,6 +90,10 @@
/* layer composing on FB? */
int fbCount;
bool isFBComposed[MAX_NUM_APP_LAYERS];
+ /* layers lying outside ROI. Will
+ * be dropped off from the composition */
+ int dropCount;
+ bool drop[MAX_NUM_APP_LAYERS];
bool needsRedraw;
int fbZ;
@@ -102,10 +108,9 @@
/* cached data */
struct LayerCache {
int layerCount;
- int mdpCount;
- int cacheCount;
- int fbZ;
buffer_handle_t hnd[MAX_NUM_APP_LAYERS];
+ bool isFBComposed[MAX_NUM_APP_LAYERS];
+ bool drop[MAX_NUM_APP_LAYERS];
/* c'tor */
LayerCache();
@@ -113,6 +118,7 @@
void reset();
void cacheAll(hwc_display_contents_1_t* list);
void updateCounts(const FrameInfo&);
+ bool isSameFrame(const FrameInfo& curFrame);
};
/* allocates pipe from pipe book */
@@ -138,8 +144,16 @@
bool fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
/* check if we can use layer cache to do at least partial MDP comp */
bool partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* Partial MDP comp that uses caching to save power as primary goal */
+ bool cacheBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* Partial MDP comp that uses number of pixels to optimize perf goal */
+ bool loadBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ /* Checks if its worth doing load based partial comp */
+ bool isLoadBasedCompDoable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list);
/* checks for conditions where only video can be bypassed */
- bool isOnlyVideoDoable(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ bool isOnlyVideoDoable(hwc_context_t *ctx, hwc_display_contents_1_t* list,
+ bool secureOnly);
/* checks for conditions where YUV layers cannot be bypassed */
bool isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
/* calcs bytes read by MDP for a given frame */
@@ -147,6 +161,10 @@
hwc_display_contents_1_t* list);
/* checks if the required bandwidth exceeds a certain max */
bool bandwidthCheck(hwc_context_t *ctx, const uint32_t& size);
+ /* generates ROI based on the modified area of the frame */
+ void generateROI(hwc_context_t *ctx, hwc_display_contents_1_t* list);
+ bool validateAndApplyROI(hwc_context_t *ctx, hwc_display_contents_1_t* list,
+ hwc_rect_t roi);
/* Is debug enabled */
static bool isDebug() { return sDebugLogs ? true : false; };
@@ -157,16 +175,29 @@
/* tracks non updating layers*/
void updateLayerCache(hwc_context_t* ctx, hwc_display_contents_1_t* list);
/* optimize layers for mdp comp*/
- void batchLayers();
- /* updates cache map with YUV info */
- void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list);
+ bool markLayersForCaching(hwc_context_t* ctx,
+ hwc_display_contents_1_t* list);
+ int getBatch(hwc_display_contents_1_t* list,
+ int& maxBatchStart, int& maxBatchEnd,
+ int& maxBatchCount);
+ bool canPushBatchToTop(const hwc_display_contents_1_t* list,
+ int fromIndex, int toIndex);
+ bool intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
+ int fromIndex, int toIndex, int targetLayerIndex);
+
+ /* updates cache map with YUV info */
+ void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
+ bool secureOnly);
bool programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list);
bool programYUV(hwc_context_t *ctx, hwc_display_contents_1_t* list);
void reset(const int& numAppLayers, hwc_display_contents_1_t* list);
+ bool isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer);
int mDpy;
static bool sEnabled;
static bool sEnableMixedMode;
+ /* Enables Partial frame composition */
+ static bool sEnablePartialFrameUpdate;
static bool sDebugLogs;
static bool sIdleFallBack;
static int sMaxPipesPerMixer;
@@ -203,6 +234,10 @@
/* Checks for pipes needed versus pipes available */
virtual bool arePipesAvailable(hwc_context_t *ctx,
hwc_display_contents_1_t* list);
+
+ /* Checks for video pipes needed versus pipes available */
+ virtual bool areVGPipesAvailable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list);
};
class MDPCompSplit : public MDPComp {
@@ -232,6 +267,10 @@
virtual bool arePipesAvailable(hwc_context_t *ctx,
hwc_display_contents_1_t* list);
+ /* Checks for video pipes needed versus pipes available */
+ virtual bool areVGPipesAvailable(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list);
+
int pipesNeeded(hwc_context_t *ctx, hwc_display_contents_1_t* list,
int mixer);
};
diff --git a/libhwcomposer/hwc_uevents.cpp b/libhwcomposer/hwc_uevents.cpp
index 8c3d00d..8906216 100644
--- a/libhwcomposer/hwc_uevents.cpp
+++ b/libhwcomposer/hwc_uevents.cpp
@@ -141,6 +141,7 @@
"event", __FUNCTION__);
ctx->proc->hotplug(ctx->proc, HWC_DISPLAY_EXTERNAL,
EXTERNAL_OFFLINE);
+ ctx->mVirtualonExtActive = false;
}
break;
}
@@ -309,7 +310,10 @@
char thread_name[64] = HWC_UEVENT_THREAD_NAME;
prctl(PR_SET_NAME, (unsigned long) &thread_name, 0, 0, 0);
setpriority(PRIO_PROCESS, 0, HAL_PRIORITY_URGENT_DISPLAY);
- uevent_init();
+ if(!uevent_init()) {
+ ALOGE("%s: failed to init uevent ",__FUNCTION__);
+ return NULL;
+ }
while(1) {
len = uevent_next_event(udata, sizeof(udata) - 2);
diff --git a/libhwcomposer/hwc_utils.cpp b/libhwcomposer/hwc_utils.cpp
index 9837bd1..e6c235e 100644
--- a/libhwcomposer/hwc_utils.cpp
+++ b/libhwcomposer/hwc_utils.cpp
@@ -312,6 +312,12 @@
float fbWidth = ctx->dpyAttr[dpy].xres;
float fbHeight = ctx->dpyAttr[dpy].yres;
+ if(ctx->dpyAttr[dpy].mDownScaleMode) {
+ // if downscale Mode is enabled for external, need to query
+ // the actual width and height, as that is the physical w & h
+ ctx->mExtDisplay->getAttributes((int&)fbWidth, (int&)fbHeight);
+ }
+
// Since external is rotated 90, need to swap width/height
int extOrient = getExtOrientation(ctx);
@@ -704,10 +710,16 @@
ctx->listStats[dpy].skipCount = 0;
ctx->listStats[dpy].needsAlphaScale = false;
ctx->listStats[dpy].preMultipliedAlpha = false;
+ ctx->listStats[dpy].isSecurePresent = false;
ctx->listStats[dpy].yuvCount = 0;
char property[PROPERTY_VALUE_MAX];
ctx->listStats[dpy].extOnlyLayerIndex = -1;
ctx->listStats[dpy].isDisplayAnimating = false;
+ ctx->listStats[dpy].roi = ovutils::Dim(0, 0,
+ (int)ctx->dpyAttr[dpy].xres, (int)ctx->dpyAttr[dpy].yres);
+ ctx->listStats[dpy].secureUI = false;
+
+ optimizeLayerRects(ctx, list, dpy);
for (size_t i = 0; i < (size_t)ctx->listStats[dpy].numAppLayers; i++) {
hwc_layer_1_t const* layer = &list->hwLayers[i];
@@ -717,6 +729,9 @@
if (layer->flags & HWC_SCREENSHOT_ANIMATOR_LAYER) {
ctx->listStats[dpy].isDisplayAnimating = true;
}
+ if(isSecureDisplayBuffer(hnd)) {
+ ctx->listStats[dpy].secureUI = true;
+ }
#endif
// continue if number of app layers exceeds MAX_NUM_APP_LAYERS
if(ctx->listStats[dpy].numAppLayers > MAX_NUM_APP_LAYERS)
@@ -725,6 +740,10 @@
//reset yuv indices
ctx->listStats[dpy].yuvIndices[i] = -1;
+ if (isSecureBuffer(hnd)) {
+ ctx->listStats[dpy].isSecurePresent = true;
+ }
+
if (isSkipLayer(&list->hwLayers[i])) {
ctx->listStats[dpy].skipCount++;
}
@@ -915,6 +934,122 @@
crop_b -= crop_h * bottomCutRatio;
}
+bool areLayersIntersecting(const hwc_layer_1_t* layer1,
+ const hwc_layer_1_t* layer2) {
+ hwc_rect_t irect = getIntersection(layer1->displayFrame,
+ layer2->displayFrame);
+ return isValidRect(irect);
+}
+
+bool isValidRect(const hwc_rect& rect)
+{
+ return ((rect.bottom > rect.top) && (rect.right > rect.left)) ;
+}
+
+/* computes the intersection of two rects */
+hwc_rect_t getIntersection(const hwc_rect_t& rect1, const hwc_rect_t& rect2)
+{
+ hwc_rect_t res;
+
+ if(!isValidRect(rect1) || !isValidRect(rect2)){
+ return (hwc_rect_t){0, 0, 0, 0};
+ }
+
+
+ res.left = max(rect1.left, rect2.left);
+ res.top = max(rect1.top, rect2.top);
+ res.right = min(rect1.right, rect2.right);
+ res.bottom = min(rect1.bottom, rect2.bottom);
+
+ if(!isValidRect(res))
+ return (hwc_rect_t){0, 0, 0, 0};
+
+ return res;
+}
+
+/* computes the union of two rects */
+hwc_rect_t getUnion(const hwc_rect &rect1, const hwc_rect &rect2)
+{
+ hwc_rect_t res;
+
+ if(!isValidRect(rect1)){
+ return rect2;
+ }
+
+ if(!isValidRect(rect2)){
+ return rect1;
+ }
+
+ res.left = min(rect1.left, rect2.left);
+ res.top = min(rect1.top, rect2.top);
+ res.right = max(rect1.right, rect2.right);
+ res.bottom = max(rect1.bottom, rect2.bottom);
+
+ return res;
+}
+
+/* deducts given rect from layers display-frame and source crop.
+ also it avoid hole creation.*/
+void deductRect(const hwc_layer_1_t* layer, hwc_rect_t& irect) {
+ hwc_rect_t& disprect = (hwc_rect_t&)layer->displayFrame;
+ hwc_rect_t& srcrect = (hwc_rect_t&)layer->sourceCrop;
+ int irect_w = irect.right - irect.left;
+ int irect_h = irect.bottom - irect.top;
+
+ if((disprect.left == irect.left) && (disprect.right == irect.right)) {
+ if((disprect.top == irect.top) && (irect.bottom <= disprect.bottom)) {
+ disprect.top = irect.bottom;
+ srcrect.top += irect_h;
+ }
+ else if((disprect.bottom == irect.bottom)
+ && (irect.top >= disprect.top)) {
+ disprect.bottom = irect.top;
+ srcrect.bottom -= irect_h;
+ }
+ }
+ else if((disprect.top == irect.top) && (disprect.bottom == irect.bottom)) {
+ if((disprect.left == irect.left) && (irect.right <= disprect.right)) {
+ disprect.left = irect.right;
+ srcrect.left += irect_w;
+ }
+ else if((disprect.right == irect.right)
+ && (irect.left >= disprect.left)) {
+ disprect.right = irect.left;
+ srcrect.right -= irect_w;
+ }
+ }
+}
+
+void optimizeLayerRects(hwc_context_t *ctx,
+ const hwc_display_contents_1_t *list, const int& dpy) {
+ int i=list->numHwLayers-2;
+ hwc_rect_t irect;
+ while(i > 0) {
+
+ //see if there is no blending required.
+ //If it is opaque see if we can substract this region from below layers.
+ if(list->hwLayers[i].blending == HWC_BLENDING_NONE) {
+ int j= i-1;
+ hwc_rect_t& topframe =
+ (hwc_rect_t&)list->hwLayers[i].displayFrame;
+ while(j >= 0) {
+ if(!needsScaling(ctx, &list->hwLayers[j], dpy)) {
+ hwc_rect_t& bottomframe =
+ (hwc_rect_t&)list->hwLayers[j].displayFrame;
+
+ hwc_rect_t irect = getIntersection(bottomframe, topframe);
+ if(isValidRect(irect)) {
+ //if intersection is valid rect, deduct it
+ deductRect(&list->hwLayers[j], irect);
+ }
+ }
+ j--;
+ }
+ }
+ i--;
+ }
+}
+
void getNonWormholeRegion(hwc_display_contents_1_t* list,
hwc_rect_t& nwr)
{
@@ -928,18 +1063,11 @@
for (uint32_t i = 1; i < last; i++) {
hwc_rect_t displayFrame = list->hwLayers[i].displayFrame;
- nwr.left = min(nwr.left, displayFrame.left);
- nwr.top = min(nwr.top, displayFrame.top);
- nwr.right = max(nwr.right, displayFrame.right);
- nwr.bottom = max(nwr.bottom, displayFrame.bottom);
+ nwr = getUnion(nwr, displayFrame);
}
//Intersect with the framebuffer
- nwr.left = max(nwr.left, fbDisplayFrame.left);
- nwr.top = max(nwr.top, fbDisplayFrame.top);
- nwr.right = min(nwr.right, fbDisplayFrame.right);
- nwr.bottom = min(nwr.bottom, fbDisplayFrame.bottom);
-
+ nwr = getIntersection(nwr, fbDisplayFrame);
}
bool isExternalActive(hwc_context_t* ctx) {
@@ -1140,6 +1268,13 @@
}
}
+ if(isSecureDisplayBuffer(hnd)) {
+ // Secure display needs both SECURE_OVERLAY and SECURE_DISPLAY_OV
+ ovutils::setMdpFlags(mdpFlags,
+ ovutils::OV_MDP_SECURE_OVERLAY_SESSION);
+ ovutils::setMdpFlags(mdpFlags,
+ ovutils::OV_MDP_SECURE_DISPLAY_OVERLAY_SESSION);
+ }
//No 90 component and no rot-downscale then flips done by MDP
//If we use rot then it might as well do flips
if(!(transform & HWC_TRANSFORM_ROT_90) && !rotDownscale) {
@@ -1265,9 +1400,17 @@
eTransform orient = static_cast<eTransform>(transform);
int downscale = 0;
int rotFlags = ovutils::ROT_FLAGS_NONE;
- Whf whf(hnd->width, hnd->height,
+ Whf whf(getWidth(hnd), getHeight(hnd),
getMdpFormat(hnd->format), hnd->size);
+ // Handle R/B swap
+ if (layer->flags & HWC_FORMAT_RB_SWAP) {
+ if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888)
+ whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888);
+ else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888)
+ whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888);
+ }
+
if(dpy && isYuvBuffer(hnd)) {
if(!ctx->listStats[dpy].isDisplayAnimating) {
ctx->mPrevCropVideo = crop;
@@ -1388,9 +1531,17 @@
const int downscale = 0;
int rotFlags = ROT_FLAGS_NONE;
- Whf whf(hnd->width, hnd->height,
+ Whf whf(getWidth(hnd), getHeight(hnd),
getMdpFormat(hnd->format), hnd->size);
+ // Handle R/B swap
+ if (layer->flags & HWC_FORMAT_RB_SWAP) {
+ if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888)
+ whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888);
+ else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888)
+ whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888);
+ }
+
if(dpy && isYuvBuffer(hnd)) {
if(!ctx->listStats[dpy].isDisplayAnimating) {
ctx->mPrevCropVideo = crop;
diff --git a/libhwcomposer/hwc_utils.h b/libhwcomposer/hwc_utils.h
index cb6d091..d281fb0 100644
--- a/libhwcomposer/hwc_utils.h
+++ b/libhwcomposer/hwc_utils.h
@@ -101,11 +101,14 @@
// Notifies hwcomposer about the start and end of animation
// This will be set to true during animation, otherwise false.
bool isDisplayAnimating;
+ ovutils::Dim roi;
+ bool secureUI; // Secure display layer
+ bool isSecurePresent;
};
struct LayerProp {
uint32_t mFlags; //qcom specific layer flags
- LayerProp():mFlags(0) {};
+ LayerProp():mFlags(0){};
};
struct VsyncState {
@@ -125,6 +128,11 @@
HWC_COPYBIT = 0x00000002,
};
+// HAL specific features
+enum {
+ HWC_FORMAT_RB_SWAP = 0x00000040,
+};
+
class LayerRotMap {
public:
LayerRotMap() { reset(); }
@@ -184,6 +192,14 @@
void dumpsys_log(android::String8& buf, const char* fmt, ...);
int getExtOrientation(hwc_context_t* ctx);
+bool isValidRect(const hwc_rect_t& rect);
+void deductRect(const hwc_layer_1_t* layer, hwc_rect_t& irect);
+hwc_rect_t getIntersection(const hwc_rect_t& rect1, const hwc_rect_t& rect2);
+hwc_rect_t getUnion(const hwc_rect_t& rect1, const hwc_rect_t& rect2);
+void optimizeLayerRects(hwc_context_t *ctx,
+ const hwc_display_contents_1_t *list, const int& dpy);
+bool areLayersIntersecting(const hwc_layer_1_t* layer1,
+ const hwc_layer_1_t* layer2);
/* Calculates the destination position based on the action safe rectangle */
void getActionSafePosition(hwc_context_t *ctx, int dpy, hwc_rect_t& dst);
@@ -296,6 +312,31 @@
return (hnd && (hnd->flags & private_handle_t::PRIV_FLAGS_EXTERNAL_CC));
}
+//Return true if the buffer is intended for Secure Display
+static inline bool isSecureDisplayBuffer(const private_handle_t* hnd) {
+ return (hnd && (hnd->flags & private_handle_t::PRIV_FLAGS_SECURE_DISPLAY));
+}
+
+static inline int getWidth(const private_handle_t* hnd) {
+ if(isYuvBuffer(hnd)) {
+ MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
+ if(metadata && metadata->operation & UPDATE_BUFFER_GEOMETRY) {
+ return metadata->bufferDim.sliceWidth;
+ }
+ }
+ return hnd->width;
+}
+
+static inline int getHeight(const private_handle_t* hnd) {
+ if(isYuvBuffer(hnd)) {
+ MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
+ if(metadata && metadata->operation & UPDATE_BUFFER_GEOMETRY) {
+ return metadata->bufferDim.sliceHeight;
+ }
+ }
+ return hnd->height;
+}
+
template<typename T> inline T max(T a, T b) { return (a > b) ? a : b; }
template<typename T> inline T min(T a, T b) { return (a < b) ? a : b; }
@@ -396,6 +437,15 @@
static inline bool isYuvPresent (hwc_context_t *ctx, int dpy) {
return ctx->listStats[dpy].yuvCount;
}
+
+static inline bool has90Transform(hwc_layer_1_t *layer) {
+ return (layer->transform & HWC_TRANSFORM_ROT_90);
+}
+
+inline bool isSecurePresent(hwc_context_t *ctx, int dpy) {
+ return ctx->listStats[dpy].isSecurePresent;
+}
+
};
#endif //HWC_UTILS_H
diff --git a/liboverlay/overlay.cpp b/liboverlay/overlay.cpp
index afe62e2..b095e9e 100644
--- a/liboverlay/overlay.cpp
+++ b/liboverlay/overlay.cpp
@@ -321,13 +321,23 @@
}
bool Overlay::displayCommit(const int& fd) {
+ utils::Dim roi;
+ return displayCommit(fd, roi);
+}
+
+bool Overlay::displayCommit(const int& fd, const utils::Dim& roi) {
//Commit
struct mdp_display_commit info;
memset(&info, 0, sizeof(struct mdp_display_commit));
info.flags = MDP_DISPLAY_COMMIT_OVERLAY;
+ info.roi.x = roi.x;
+ info.roi.y = roi.y;
+ info.roi.w = roi.w;
+ info.roi.h = roi.h;
+
if(!mdp_wrapper::displayCommit(fd, info)) {
- ALOGE("%s: commit failed", __func__);
- return false;
+ ALOGE("%s: commit failed", __func__);
+ return false;
}
return true;
}
diff --git a/liboverlay/overlay.h b/liboverlay/overlay.h
index 4b91038..c16f6e6 100644
--- a/liboverlay/overlay.h
+++ b/liboverlay/overlay.h
@@ -86,6 +86,8 @@
int availablePipes(int dpy, int mixer);
/* Returns available ("unallocated") pipes for a display */
int availablePipes(int dpy);
+ /* Returns available ("unallocated") pipe of given type for a display */
+ int availablePipes(int dpy, utils::eMdpPipeType type);
/* Returns if any of the requested pipe type is attached to any of the
* displays
*/
@@ -107,6 +109,7 @@
static int getDMAMode();
/* Returns the framebuffer node backing up the display */
static int getFbForDpy(const int& dpy);
+ static bool displayCommit(const int& fd, const utils::Dim& roi);
static bool displayCommit(const int& fd);
private:
@@ -215,6 +218,19 @@
return avail;
}
+inline int Overlay::availablePipes(int dpy, utils::eMdpPipeType type) {
+ int avail = 0;
+ for(int i = 0; i < PipeBook::NUM_PIPES; i++) {
+ if((mPipeBook[i].mDisplay == DPY_UNUSED ||
+ mPipeBook[i].mDisplay == dpy) &&
+ PipeBook::isNotAllocated(i) &&
+ type == PipeBook::getPipeType((utils::eDest)i)) {
+ avail++;
+ }
+ }
+ return avail;
+}
+
inline void Overlay::setDMAMode(const int& mode) {
if(mode == DMA_LINE_MODE || mode == DMA_BLOCK_MODE)
sDMAMode = mode;
diff --git a/liboverlay/overlayMdssRot.cpp b/liboverlay/overlayMdssRot.cpp
index 95f22c6..c6d5332 100644
--- a/liboverlay/overlayMdssRot.cpp
+++ b/liboverlay/overlayMdssRot.cpp
@@ -32,7 +32,6 @@
#define MDSS_MDP_ROT_ONLY 0x80
#endif
-#define SIZE_1M 0x00100000
#define MDSS_ROT_MASK (MDP_ROT_90 | MDP_FLIP_UD | MDP_FLIP_LR)
namespace ovutils = overlay::utils;
@@ -261,9 +260,6 @@
opBufSize = Rotator::calcOutputBufSize(destWhf);
}
- if (mRotInfo.flags & utils::OV_MDP_SECURE_OVERLAY_SESSION)
- opBufSize = utils::align(opBufSize, SIZE_1M);
-
return opBufSize;
}
diff --git a/liboverlay/overlayMem.h b/liboverlay/overlayMem.h
index 061d197..5e0db6f 100644
--- a/liboverlay/overlayMem.h
+++ b/liboverlay/overlayMem.h
@@ -38,6 +38,7 @@
#include "gralloc_priv.h"
#include "overlayUtils.h"
+#define SIZE_1M 0x00100000
namespace overlay {
@@ -118,24 +119,28 @@
{
alloc_data data;
int allocFlags = GRALLOC_USAGE_PRIVATE_IOMMU_HEAP;
- if(isSecure) {
- allocFlags = GRALLOC_USAGE_PRIVATE_MM_HEAP;
- allocFlags |= GRALLOC_USAGE_PROTECTED;
- }
- // Allocate uncached rotator buffers
- allocFlags |= GRALLOC_USAGE_PRIVATE_UNCACHED;
-
int err = 0;
OVASSERT(numbufs && bufSz, "numbufs=%d bufSz=%d", numbufs, bufSz);
- mBufSz = bufSz;
+ if(isSecure) {
+ allocFlags = GRALLOC_USAGE_PRIVATE_MM_HEAP;
+ allocFlags |= GRALLOC_USAGE_PROTECTED;
+ mBufSz = utils::align(bufSz, SIZE_1M);
+ data.align = SIZE_1M;
+ } else {
+ mBufSz = bufSz;
+ data.align = getpagesize();
+ }
+
+ // Allocate uncached rotator buffers
+ allocFlags |= GRALLOC_USAGE_PRIVATE_UNCACHED;
+
mNumBuffers = numbufs;
data.base = 0;
data.fd = -1;
data.offset = 0;
data.size = mBufSz * mNumBuffers;
- data.align = getpagesize();
data.uncached = true;
err = mAlloc->allocate(data, allocFlags);
diff --git a/liboverlay/overlayRotator.cpp b/liboverlay/overlayRotator.cpp
index 84a9818..7b3dda1 100644
--- a/liboverlay/overlayRotator.cpp
+++ b/liboverlay/overlayRotator.cpp
@@ -168,8 +168,8 @@
mRot[i]->getDump(buf, len);
}
}
- char str[32] = {'\0'};
- snprintf(str, 32, "\n================\n");
+ char str[4] = {'\0'};
+ snprintf(str, 4, "\n");
strncat(buf, str, strlen(str));
}
diff --git a/liboverlay/overlayUtils.cpp b/liboverlay/overlayUtils.cpp
index 927e7e9..1377182 100644
--- a/liboverlay/overlayUtils.cpp
+++ b/liboverlay/overlayUtils.cpp
@@ -100,6 +100,8 @@
return MDP_RGB_565;
case HAL_PIXEL_FORMAT_BGRA_8888:
return MDP_BGRA_8888;
+ case HAL_PIXEL_FORMAT_BGRX_8888:
+ return MDP_BGRX_8888;
case HAL_PIXEL_FORMAT_YV12:
return MDP_Y_CR_CB_GH2V2;
case HAL_PIXEL_FORMAT_YCbCr_422_SP:
@@ -114,6 +116,10 @@
return MDP_Y_CBCR_H2V2;
case HAL_PIXEL_FORMAT_YCrCb_422_SP:
return MDP_Y_CRCB_H2V1;
+ case HAL_PIXEL_FORMAT_YCbCr_422_I:
+ return MDP_YCBYCR_H2V1;
+ case HAL_PIXEL_FORMAT_YCrCb_422_I:
+ return MDP_YCRYCB_H2V1;
case HAL_PIXEL_FORMAT_YCbCr_444_SP:
return MDP_Y_CBCR_H1V1;
case HAL_PIXEL_FORMAT_YCrCb_444_SP:
@@ -128,7 +134,6 @@
//---graphics.h--------
//HAL_PIXEL_FORMAT_RGBA_5551
//HAL_PIXEL_FORMAT_RGBA_4444
- //HAL_PIXEL_FORMAT_YCbCr_422_I
//---gralloc_priv.h-----
//HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO = 0x7FA30C01
//HAL_PIXEL_FORMAT_R_8 = 0x10D
@@ -169,7 +174,11 @@
return HAL_PIXEL_FORMAT_YCbCr_420_SP;
case MDP_Y_CRCB_H2V1:
return HAL_PIXEL_FORMAT_YCrCb_422_SP;
- case MDP_Y_CBCR_H1V1:
+ case MDP_YCBYCR_H2V1:
+ return HAL_PIXEL_FORMAT_YCbCr_422_I;
+ case MDP_YCRYCB_H2V1:
+ return HAL_PIXEL_FORMAT_YCrCb_422_I;
+ case MDP_Y_CBCR_H1V1:
return HAL_PIXEL_FORMAT_YCbCr_444_SP;
case MDP_Y_CRCB_H1V1:
return HAL_PIXEL_FORMAT_YCrCb_444_SP;
@@ -418,9 +427,8 @@
const msmfb_data& ov) {
char str_data[256] = {'\0'};
snprintf(str_data, 256,
- "%s offset=%d memid=%d id=%d flags=0x%x priv=%d\n",
- prefix, ov.offset, ov.memory_id, ov.id, ov.flags,
- ov.priv);
+ "%s offset=%d memid=%d id=%d flags=0x%x\n",
+ prefix, ov.offset, ov.memory_id, ov.id, ov.flags);
strncat(buf, str_data, strlen(str_data));
}
diff --git a/liboverlay/overlayUtils.h b/liboverlay/overlayUtils.h
index a9b7e6a..55f1767 100644
--- a/liboverlay/overlayUtils.h
+++ b/liboverlay/overlayUtils.h
@@ -261,6 +261,7 @@
OV_MDP_PIPE_FORCE_DMA = MDP_OV_PIPE_FORCE_DMA,
OV_MDP_DEINTERLACE = MDP_DEINTERLACE,
OV_MDP_SECURE_OVERLAY_SESSION = MDP_SECURE_OVERLAY_SESSION,
+ OV_MDP_SECURE_DISPLAY_OVERLAY_SESSION = MDP_SECURE_DISPLAY_OVERLAY_SESSION,
OV_MDP_SOURCE_ROTATED_90 = MDP_SOURCE_ROTATED_90,
OV_MDP_BACKEND_COMPOSITION = MDP_BACKEND_COMPOSITION,
OV_MDP_BLEND_FG_PREMULT = MDP_BLEND_FG_PREMULT,
@@ -488,6 +489,8 @@
case MDP_Y_CR_CB_H2V2:
case MDP_Y_CR_CB_GH2V2:
case MDP_Y_CBCR_H2V2_VENUS:
+ case MDP_YCBYCR_H2V1:
+ case MDP_YCRYCB_H2V1:
return true;
default:
return false;
@@ -518,6 +521,7 @@
formats[MDP_ARGB_8888] = STR(MDP_ARGB_8888);
formats[MDP_RGB_888] = STR(MDP_RGB_888);
formats[MDP_Y_CRCB_H2V2] = STR(MDP_Y_CRCB_H2V2);
+ formats[MDP_YCBYCR_H2V1] = STR(MDP_YCBYCR_H2V1);
formats[MDP_YCRYCB_H2V1] = STR(MDP_YCRYCB_H2V1);
formats[MDP_CBYCRY_H2V1] = STR(MDP_CBYCRY_H2V1);
formats[MDP_Y_CRCB_H2V1] = STR(MDP_Y_CRCB_H2V1);
diff --git a/liboverlay/overlayWriteback.cpp b/liboverlay/overlayWriteback.cpp
index f1f0eb5..e7d25b0 100644
--- a/liboverlay/overlayWriteback.cpp
+++ b/liboverlay/overlayWriteback.cpp
@@ -37,9 +37,6 @@
//=========== class WritebackMem ==============================================
bool WritebackMem::manageMem(uint32_t size, bool isSecure) {
- if(isSecure) {
- size = utils::align(size, SIZE_1M);
- }
if(mBuf.bufSz() == size) {
return true;
}
@@ -218,6 +215,13 @@
return mOpFmt;
}
+void Writeback::getDump(char *buf, size_t len) const {
+ utils::getDump(buf, len, "WBData", mFbData);
+ char str[4] = {'\0'};
+ snprintf(str, 4, "\n");
+ strncat(buf, str, strlen(str));
+}
+
//static
Writeback *Writeback::getInstance() {
diff --git a/liboverlay/overlayWriteback.h b/liboverlay/overlayWriteback.h
index 8c0c52a..33eb059 100644
--- a/liboverlay/overlayWriteback.h
+++ b/liboverlay/overlayWriteback.h
@@ -87,6 +87,7 @@
int getFbFd() const { return mFd.getFD(); }
int getOutputFormat();
bool setOutputFormat(int mdpFormat);
+ void getDump(char *buf, size_t len) const;
static Writeback* getInstance();
static void configBegin() { sUsed = false; }
diff --git a/libqdutils/mdp_version.cpp b/libqdutils/mdp_version.cpp
index 4a695d3..a444920 100644
--- a/libqdutils/mdp_version.cpp
+++ b/libqdutils/mdp_version.cpp
@@ -114,7 +114,7 @@
//TODO get this from driver
mMDPDownscale = 4;
- char split[64];
+ char split[64] = {0};
FILE* fp = fopen("/sys/class/graphics/fb0/msm_fb_split", "r");
if(fp){
//Format "left right" space as delimiter
diff --git a/libqdutils/qdMetaData.cpp b/libqdutils/qdMetaData.cpp
index 1316e59..89ca92e 100644
--- a/libqdutils/qdMetaData.cpp
+++ b/libqdutils/qdMetaData.cpp
@@ -78,6 +78,9 @@
case PP_PARAM_TIMESTAMP:
data->timestamp = *((int64_t *)param);
break;
+ case UPDATE_BUFFER_GEOMETRY:
+ memcpy((void *)&data->bufferDim, param, sizeof(BufferDim_t));
+ break;
default:
ALOGE("Unknown paramType %d", paramType);
break;
diff --git a/libqdutils/qdMetaData.h b/libqdutils/qdMetaData.h
index 9f10cf8..d5354a4 100644
--- a/libqdutils/qdMetaData.h
+++ b/libqdutils/qdMetaData.h
@@ -52,9 +52,15 @@
uint16_t c2[MAX_IGC_LUT_ENTRIES];
};
+struct BufferDim_t {
+ int32_t sliceWidth;
+ int32_t sliceHeight;
+};
+
struct MetaData_t {
int32_t operation;
int32_t interlaced;
+ BufferDim_t bufferDim;
HSICData_t hsicData;
int32_t sharpness;
int32_t video_interface;
@@ -70,7 +76,8 @@
PP_PARAM_VID_INTFC = 0x0008,
PP_PARAM_IGC = 0x0010,
PP_PARAM_SHARP2 = 0x0020,
- PP_PARAM_TIMESTAMP = 0x0040
+ PP_PARAM_TIMESTAMP = 0x0040,
+ UPDATE_BUFFER_GEOMETRY = 0x0080,
} DispParamType;
int setMetaData(private_handle_t *handle, DispParamType paramType, void *param);
diff --git a/libvirtual/virtual.cpp b/libvirtual/virtual.cpp
index c94a5c6..342044c 100644
--- a/libvirtual/virtual.cpp
+++ b/libvirtual/virtual.cpp
@@ -55,6 +55,9 @@
#define MAX_SYSFS_FILE_PATH 255
+/* Max. resolution assignable to virtual display. */
+#define SUPPORTED_VIRTUAL_AREA (1920*1080)
+
int VirtualDisplay::configure() {
if(!openFrameBuffer())
return -1;
@@ -94,47 +97,85 @@
closeFrameBuffer();
}
+/* Initializes the resolution attributes of the virtual display
+ that are reported to SurfaceFlinger.
+ Cases:
+ 1. ONLINE event - initialize to frame buffer resolution
+ 2. RESUME event - retain original resolution
+*/
+void VirtualDisplay::initResolution(uint32_t &extW, uint32_t &extH) {
+ // On ONLINE event, display resolution attributes are 0.
+ if(extW == 0 || extH == 0){
+ extW = mVInfo.xres;
+ extH = mVInfo.yres;
+ }
+}
+
+/* Sets the virtual resolution to match that of the primary
+ display in the event that the virtual display currently
+ connected has a lower resolution. NB: we always report the
+ highest available resolution to SurfaceFlinger.
+*/
+void VirtualDisplay::setToPrimary(uint32_t maxArea,
+ uint32_t priW,
+ uint32_t priH,
+ uint32_t &extW,
+ uint32_t &extH) {
+ // for eg., primary in 1600p and WFD in 1080p
+ // we wont use downscale feature because MAX MDP
+ // writeback resolution supported is 1080p (tracked
+ // by SUPPORTED_VIRTUAL_AREA).
+ if((maxArea == (priW * priH))
+ && (maxArea <= SUPPORTED_VIRTUAL_AREA)) {
+ extW = priW;
+ extH = priH;
+ // If WFD is in landscape, assign the higher dimension
+ // to WFD's xres.
+ if(priH > priW) {
+ extW = priH;
+ extH = priW;
+ }
+ }
+}
+
+/* Set External Display MDP Downscale mode indicator. Only set to
+ TRUE for the following scenarios:
+ 1. Valid DRC scenarios i.e. when the original WFD resolution
+ is greater than the new/requested resolution in mVInfo.
+ 2. WFD down scale path i.e. when WFD resolution is lower than
+ primary resolution.
+ Furthermore, downscale mode is only valid when downscaling from
+ SUPPORTED_VIRTUAL_AREA to a lower resolution.
+ (SUPPORTED_VIRTUAL_AREA represents the maximum resolution that
+ we can configure to the virtual display)
+*/
+void VirtualDisplay::setDownScaleMode(uint32_t maxArea) {
+ if((maxArea > (mVInfo.xres * mVInfo.yres))
+ && (maxArea <= SUPPORTED_VIRTUAL_AREA)) {
+ mHwcContext->dpyAttr[HWC_DISPLAY_VIRTUAL].mDownScaleMode = true;
+ }else {
+ mHwcContext->dpyAttr[HWC_DISPLAY_VIRTUAL].mDownScaleMode = false;
+ }
+}
+
void VirtualDisplay::setAttributes() {
if(mHwcContext) {
- unsigned int &w = mHwcContext->dpyAttr[HWC_DISPLAY_VIRTUAL].xres;
- unsigned int &h = mHwcContext->dpyAttr[HWC_DISPLAY_VIRTUAL].yres;
+ uint32_t &extW = mHwcContext->dpyAttr[HWC_DISPLAY_VIRTUAL].xres;
+ uint32_t &extH = mHwcContext->dpyAttr[HWC_DISPLAY_VIRTUAL].yres;
+ uint32_t priW = mHwcContext->dpyAttr[HWC_DISPLAY_PRIMARY].xres;
+ uint32_t priH = mHwcContext->dpyAttr[HWC_DISPLAY_PRIMARY].yres;
- // Always set dpyAttr res to mVInfo res, only on an ONLINE event. Keep
- // the original configuration to cater for DRC initiated RESUME events
- if(w == 0 || h == 0){
- w = mVInfo.xres;
- h = mVInfo.yres;
- }
- mHwcContext->dpyAttr[HWC_DISPLAY_VIRTUAL].mDownScaleMode = false;
+ initResolution(extW, extH);
if(!qdutils::MDPVersion::getInstance().is8x26()) {
- uint32_t priW = mHwcContext->dpyAttr[HWC_DISPLAY_PRIMARY].xres;
- uint32_t priH = mHwcContext->dpyAttr[HWC_DISPLAY_PRIMARY].yres;
- // Find the maximum resolution between primary and virtual
- uint32_t maxArea = max((w * h), (priW * priH));
+ // maxArea represents the maximum resolution between
+ // primary and virtual display.
+ uint32_t maxArea = max((extW * extH), (priW * priH));
- // If primary resolution is more than the wfd resolution
- // configure dpy attr to primary resolution and set
- // downscale mode.
- // DRC is only valid when the original resolution on the WiFi
- // display is greater than the new resolution in mVInfo.
- if(maxArea > (mVInfo.xres * mVInfo.yres)) {
- if(maxArea == (priW * priH)) {
- // Here we account for the case when primary resolution is
- // greater than that of the WiFi display
- w = priW;
- h = priH;
- // WFD is always in landscape, so always assign the higher
- // dimension to wfd's xres
- if(priH > priW) {
- w = priH;
- h = priW;
- }
- }
- // Set External Display MDP Downscale mode indicator
- mHwcContext->dpyAttr[HWC_DISPLAY_VIRTUAL].mDownScaleMode = true;
- }
+ setToPrimary(maxArea, priW, priH, extW, extH);
+
+ setDownScaleMode(maxArea);
}
mHwcContext->dpyAttr[HWC_DISPLAY_VIRTUAL].vsync_period =
1000000000l /60;
diff --git a/libvirtual/virtual.h b/libvirtual/virtual.h
index 8003e23..a6aec40 100644
--- a/libvirtual/virtual.h
+++ b/libvirtual/virtual.h
@@ -52,6 +52,10 @@
bool openFrameBuffer();
bool closeFrameBuffer();
void setAttributes();
+ void initResolution(uint32_t &extW, uint32_t &extH);
+ void setToPrimary(uint32_t maxArea, uint32_t priW, uint32_t priH,
+ uint32_t &extW, uint32_t &extH);
+ void setDownScaleMode(uint32_t maxArea);
int mFd;
hwc_context_t *mHwcContext;