Merge "HWC: Fix for use int reference in function call"
diff --git a/libcopybit/copybit.cpp b/libcopybit/copybit.cpp
index 5d8da05..9558f69 100644
--- a/libcopybit/copybit.cpp
+++ b/libcopybit/copybit.cpp
@@ -637,6 +637,7 @@
return -EINVAL;
}
+ int status = 0;
struct blitReq* list = &ctx->list;
mdp_blit_req* req = &list->req[list->count++];
set_infos(ctx, req, MDP_SOLID_FILL);
@@ -654,7 +655,11 @@
req->const_color.b = (uint32_t)((color >> 16) & 0xff);
req->const_color.alpha = (uint32_t)((color >> 24) & 0xff);
- int status = msm_copybit(ctx, list);
+ if (list->count == sizeof(list->req)/sizeof(list->req[0])) {
+ status = msm_copybit(ctx, list);
+ list->sync.acq_fen_fd_cnt = 0;
+ list->count = 0;
+ }
return status;
}
@@ -714,6 +719,10 @@
ctx->mAlpha = MDP_ALPHA_NOP;
ctx->mFlags = 0;
ctx->sync.flags = 0;
+ ctx->relFence = -1;
+ for (int i=0; i < MDP_MAX_FENCE_FD; i++) {
+ ctx->acqFence[i] = -1;
+ }
ctx->sync.acq_fen_fd = ctx->acqFence;
ctx->sync.rel_fen_fd = &ctx->relFence;
ctx->list.count = 0;
diff --git a/libexternal/external.cpp b/libexternal/external.cpp
index 045edd8..d905e0d 100644
--- a/libexternal/external.cpp
+++ b/libexternal/external.cpp
@@ -42,6 +42,9 @@
#define MAX_SYSFS_FILE_PATH 255
#define UNKNOWN_STRING "unknown"
#define SPD_NAME_LENGTH 16
+/* Max. resolution assignable to when downscale */
+#define SUPPORTED_DOWNSCALE_EXT_AREA (1920*1080)
+
int ExternalDisplay::configure() {
if(!openFrameBuffer()) {
@@ -590,7 +593,7 @@
// downscale mode
// Restrict this upto 1080p resolution max
if(((priW * priH) > (width * height)) &&
- (priW <= qdutils::MAX_DISPLAY_DIM )) {
+ ((priW * priH) <= SUPPORTED_DOWNSCALE_EXT_AREA)) {
mHwcContext->dpyAttr[HWC_DISPLAY_EXTERNAL].xres = priW;
mHwcContext->dpyAttr[HWC_DISPLAY_EXTERNAL].yres = priH;
// HDMI is always in landscape, so always assign the higher
diff --git a/libhwcomposer/hwc.cpp b/libhwcomposer/hwc.cpp
index d86be3e..a805486 100644
--- a/libhwcomposer/hwc.cpp
+++ b/libhwcomposer/hwc.cpp
@@ -138,15 +138,6 @@
ctx->layerProp[dpy] = new LayerProp[numAppLayers];
}
-static void handleGeomChange(hwc_context_t *ctx, int dpy,
- hwc_display_contents_1_t *list) {
- /* No point to calling overlay_set on MDP3 */
- if(list->flags & HWC_GEOMETRY_CHANGED &&
- ctx->mMDP.version >= qdutils::MDP_V4_0) {
- ctx->mOverlay->forceSet(dpy);
- }
-}
-
static int hwc_prepare_primary(hwc_composer_device_1 *dev,
hwc_display_contents_1_t *list) {
ATRACE_CALL();
@@ -155,22 +146,17 @@
if (LIKELY(list && list->numHwLayers > 1) &&
ctx->dpyAttr[dpy].isActive) {
reset_layer_prop(ctx, dpy, list->numHwLayers - 1);
- handleGeomChange(ctx, dpy, list);
- uint32_t last = list->numHwLayers - 1;
- hwc_layer_1_t *fbLayer = &list->hwLayers[last];
- if(fbLayer->handle) {
- setListStats(ctx, list, dpy);
+ setListStats(ctx, list, dpy);
#ifdef VPU_TARGET
- ctx->mVPUClient->prepare(ctx, list);
+ ctx->mVPUClient->prepare(ctx, list);
#endif
- if(ctx->mMDPComp[dpy]->prepare(ctx, list) < 0) {
- const int fbZ = 0;
- ctx->mFBUpdate[dpy]->prepare(ctx, list, fbZ);
- }
- if (ctx->mMDP.version < qdutils::MDP_V4_0) {
- if(ctx->mCopyBit[dpy])
- ctx->mCopyBit[dpy]->prepare(ctx, list, dpy);
- }
+ if(ctx->mMDPComp[dpy]->prepare(ctx, list) < 0) {
+ const int fbZ = 0;
+ ctx->mFBUpdate[dpy]->prepare(ctx, list, fbZ);
+ }
+ if (ctx->mMDP.version < qdutils::MDP_V4_0) {
+ if(ctx->mCopyBit[dpy])
+ ctx->mCopyBit[dpy]->prepare(ctx, list, dpy);
}
}
return 0;
@@ -186,25 +172,20 @@
ctx->dpyAttr[dpy].isActive &&
ctx->dpyAttr[dpy].connected) {
reset_layer_prop(ctx, dpy, list->numHwLayers - 1);
- handleGeomChange(ctx, dpy, list);
- uint32_t last = list->numHwLayers - 1;
- hwc_layer_1_t *fbLayer = &list->hwLayers[last];
if(!ctx->dpyAttr[dpy].isPause) {
- if(fbLayer->handle) {
- ctx->dpyAttr[dpy].isConfiguring = false;
- setListStats(ctx, list, dpy);
- if(ctx->mMDPComp[dpy]->prepare(ctx, list) < 0) {
- const int fbZ = 0;
- ctx->mFBUpdate[dpy]->prepare(ctx, list, fbZ);
- }
+ ctx->dpyAttr[dpy].isConfiguring = false;
+ setListStats(ctx, list, dpy);
+ if(ctx->mMDPComp[dpy]->prepare(ctx, list) < 0) {
+ const int fbZ = 0;
+ ctx->mFBUpdate[dpy]->prepare(ctx, list, fbZ);
+ }
- if(ctx->listStats[dpy].isDisplayAnimating) {
- // Mark all app layers as HWC_OVERLAY for external during
- // animation, so that SF doesnt draw it on FB
- for(int i = 0 ;i < ctx->listStats[dpy].numAppLayers; i++) {
- hwc_layer_1_t *layer = &list->hwLayers[i];
- layer->compositionType = HWC_OVERLAY;
- }
+ if(ctx->listStats[dpy].isDisplayAnimating) {
+ // Mark all app layers as HWC_OVERLAY for external during
+ // animation, so that SF doesnt draw it on FB
+ for(int i = 0 ;i < ctx->listStats[dpy].numAppLayers; i++) {
+ hwc_layer_1_t *layer = &list->hwLayers[i];
+ layer->compositionType = HWC_OVERLAY;
}
}
} else {
@@ -229,25 +210,20 @@
ctx->dpyAttr[dpy].isActive &&
ctx->dpyAttr[dpy].connected) {
reset_layer_prop(ctx, dpy, list->numHwLayers - 1);
- handleGeomChange(ctx, dpy, list);
- uint32_t last = list->numHwLayers - 1;
- hwc_layer_1_t *fbLayer = &list->hwLayers[last];
if(!ctx->dpyAttr[dpy].isPause) {
- if(fbLayer->handle) {
- ctx->dpyAttr[dpy].isConfiguring = false;
- setListStats(ctx, list, dpy);
- if(ctx->mMDPComp[dpy]->prepare(ctx, list) < 0) {
- const int fbZ = 0;
- ctx->mFBUpdate[dpy]->prepare(ctx, list, fbZ);
- }
+ ctx->dpyAttr[dpy].isConfiguring = false;
+ setListStats(ctx, list, dpy);
+ if(ctx->mMDPComp[dpy]->prepare(ctx, list) < 0) {
+ const int fbZ = 0;
+ ctx->mFBUpdate[dpy]->prepare(ctx, list, fbZ);
+ }
- if(ctx->listStats[dpy].isDisplayAnimating) {
- // Mark all app layers as HWC_OVERLAY for virtual during
- // animation, so that SF doesnt draw it on FB
- for(int i = 0 ;i < ctx->listStats[dpy].numAppLayers; i++) {
- hwc_layer_1_t *layer = &list->hwLayers[i];
- layer->compositionType = HWC_OVERLAY;
- }
+ if(ctx->listStats[dpy].isDisplayAnimating) {
+ // Mark all app layers as HWC_OVERLAY for virtual during
+ // animation, so that SF doesnt draw it on FB
+ for(int i = 0 ;i < ctx->listStats[dpy].numAppLayers; i++) {
+ hwc_layer_1_t *layer = &list->hwLayers[i];
+ layer->compositionType = HWC_OVERLAY;
}
}
} else {
diff --git a/libhwcomposer/hwc_copybit.cpp b/libhwcomposer/hwc_copybit.cpp
index bb1b032..4695a4f 100644
--- a/libhwcomposer/hwc_copybit.cpp
+++ b/libhwcomposer/hwc_copybit.cpp
@@ -164,8 +164,6 @@
LayerProp *layerProp = ctx->layerProp[dpy];
size_t fbLayerIndex = ctx->listStats[dpy].fbLayerIndex;
hwc_layer_1_t *fbLayer = &list->hwLayers[fbLayerIndex];
- private_handle_t *fbHnd = (private_handle_t *)fbLayer->handle;
-
// Following are MDP3 limitations for which we
// need to fallback to GPU composition:
@@ -178,13 +176,14 @@
hwc_layer_1_t *layer = (hwc_layer_1_t *) &list->hwLayers[i];
if (layer->planeAlpha != 0xFF)
return true;
+ hwc_rect_t sourceCrop = integerizeSourceCrop(layer->sourceCropf);
if (layer->transform & HAL_TRANSFORM_ROT_90) {
- src_h = layer->sourceCrop.right - layer->sourceCrop.left;
- src_w = layer->sourceCrop.bottom - layer->sourceCrop.top;
+ src_h = sourceCrop.right - sourceCrop.left;
+ src_w = sourceCrop.bottom - sourceCrop.top;
} else {
- src_h = layer->sourceCrop.bottom - layer->sourceCrop.top;
- src_w = layer->sourceCrop.right - layer->sourceCrop.left;
+ src_h = sourceCrop.bottom - sourceCrop.top;
+ src_w = sourceCrop.right - sourceCrop.left;
}
dst_h = layer->displayFrame.bottom - layer->displayFrame.top;
dst_w = layer->displayFrame.right - layer->displayFrame.left;
@@ -201,9 +200,9 @@
//Allocate render buffers if they're not allocated
if (useCopybitForYUV || useCopybitForRGB) {
- int ret = allocRenderBuffers(fbHnd->width,
- fbHnd->height,
- fbHnd->format);
+ int ret = allocRenderBuffers(mAlignedFBWidth,
+ mAlignedFBHeight,
+ HAL_PIXEL_FORMAT_RGBA_8888);
if (ret < 0) {
return false;
} else {
@@ -317,9 +316,10 @@
copybit_device_t *copybit = getCopyBitDevice();
// Async mode
copybit->flush_get_fence(copybit, fd);
- if(mRelFd[0] >=0 && ctx->mMDP.version == qdutils::MDP_V3_0_4) {
- close(mRelFd[0]);
- mRelFd[0] = -1;
+ if(mRelFd[mCurRenderBufferIndex] >=0 &&
+ ctx->mMDP.version == qdutils::MDP_V3_0_4) {
+ close(mRelFd[mCurRenderBufferIndex]);
+ mRelFd[mCurRenderBufferIndex] = -1;
}
}
return true;
@@ -427,8 +427,7 @@
float copybitsMinScale =
(float)copybit->get(copybit,COPYBIT_MINIFICATION_LIMIT);
- if((layer->transform == HWC_TRANSFORM_ROT_90) ||
- (layer->transform == HWC_TRANSFORM_ROT_270)) {
+ if (layer->transform & HWC_TRANSFORM_ROT_90) {
//swap screen width and height
int tmp = screen_w;
screen_w = screen_h;
@@ -618,6 +617,7 @@
copybit->set_parameter(copybit, COPYBIT_DITHER,
(dst.format == HAL_PIXEL_FORMAT_RGB_565) ?
COPYBIT_ENABLE : COPYBIT_DISABLE);
+ copybit->set_parameter(copybit, COPYBIT_TRANSFORM, 0);
copybit->set_parameter(copybit, COPYBIT_BLEND_MODE, layer->blending);
copybit->set_parameter(copybit, COPYBIT_PLANE_ALPHA, layer->planeAlpha);
copybit->set_parameter(copybit, COPYBIT_BLIT_TO_FRAMEBUFFER,COPYBIT_ENABLE);
@@ -695,8 +695,15 @@
return mEngine;
}
-CopyBit::CopyBit():mIsModeOn(false), mCopyBitDraw(false),
- mCurRenderBufferIndex(0){
+CopyBit::CopyBit(hwc_context_t *ctx, const int& dpy) : mIsModeOn(false),
+ mCopyBitDraw(false), mCurRenderBufferIndex(0) {
+
+ getBufferSizeAndDimensions(ctx->dpyAttr[dpy].xres,
+ ctx->dpyAttr[dpy].yres,
+ HAL_PIXEL_FORMAT_RGBA_8888,
+ mAlignedFBWidth,
+ mAlignedFBHeight);
+
hw_module_t const *module;
for (int i = 0; i < NUM_RENDER_BUFFERS; i++) {
mRenderBuffer[i] = NULL;
diff --git a/libhwcomposer/hwc_copybit.h b/libhwcomposer/hwc_copybit.h
index fd5c939..4d8123c 100644
--- a/libhwcomposer/hwc_copybit.h
+++ b/libhwcomposer/hwc_copybit.h
@@ -32,7 +32,7 @@
class CopyBit {
public:
- CopyBit();
+ CopyBit(hwc_context_t *ctx, const int& dpy);
~CopyBit();
// API to get copybit engine(non static)
struct copybit_device_t *getCopyBitDevice();
@@ -89,6 +89,8 @@
//Dynamic composition threshold for deciding copybit usage.
double mDynThreshold;
+ int mAlignedFBWidth;
+ int mAlignedFBHeight;
};
}; //namespace qhwc
diff --git a/libhwcomposer/hwc_fbupdate.cpp b/libhwcomposer/hwc_fbupdate.cpp
index ce18695..bb9adbf 100644
--- a/libhwcomposer/hwc_fbupdate.cpp
+++ b/libhwcomposer/hwc_fbupdate.cpp
@@ -39,20 +39,29 @@
IFBUpdate* IFBUpdate::getObject(hwc_context_t *ctx, const int& dpy) {
if(isDisplaySplit(ctx, dpy)) {
- return new FBUpdateSplit(dpy);
+ return new FBUpdateSplit(ctx, dpy);
}
- return new FBUpdateNonSplit(dpy);
+ return new FBUpdateNonSplit(ctx, dpy);
}
-inline void IFBUpdate::reset() {
+IFBUpdate::IFBUpdate(hwc_context_t *ctx, const int& dpy) : mDpy(dpy) {
+ getBufferSizeAndDimensions(ctx->dpyAttr[dpy].xres,
+ ctx->dpyAttr[dpy].yres,
+ HAL_PIXEL_FORMAT_RGBA_8888,
+ mAlignedFBWidth,
+ mAlignedFBHeight);
+}
+
+void IFBUpdate::reset() {
mModeOn = false;
mRot = NULL;
}
//================= Low res====================================
-FBUpdateNonSplit::FBUpdateNonSplit(const int& dpy): IFBUpdate(dpy) {}
+FBUpdateNonSplit::FBUpdateNonSplit(hwc_context_t *ctx, const int& dpy):
+ IFBUpdate(ctx, dpy) {}
-inline void FBUpdateNonSplit::reset() {
+void FBUpdateNonSplit::reset() {
IFBUpdate::reset();
mDest = ovutils::OV_INVALID;
}
@@ -107,9 +116,10 @@
layer->compositionType = HWC_OVERLAY;
}
overlay::Overlay& ov = *(ctx->mOverlay);
- private_handle_t *hnd = (private_handle_t *)layer->handle;
- ovutils::Whf info(getWidth(hnd), getHeight(hnd),
- ovutils::getMdpFormat(hnd->format), hnd->size);
+
+ ovutils::Whf info(mAlignedFBWidth,
+ mAlignedFBHeight,
+ ovutils::getMdpFormat(HAL_PIXEL_FORMAT_RGBA_8888));
//Request a pipe
ovutils::eMdpPipeType type = ovutils::OV_MDP_PIPE_ANY;
@@ -160,7 +170,7 @@
displayFrame = sourceCrop;
}
}
- calcExtDisplayPosition(ctx, hnd, mDpy, sourceCrop, displayFrame,
+ calcExtDisplayPosition(ctx, NULL, mDpy, sourceCrop, displayFrame,
transform, orient);
setMdpFlags(layer, mdpFlags, 0, transform);
// For External use rotator if there is a rotation value set
@@ -168,6 +178,8 @@
sourceCrop, mdpFlags, rotFlags);
if(!ret) {
ALOGE("%s: preRotate for external Failed!", __FUNCTION__);
+ ctx->mOverlay->clear(mDpy);
+ ctx->mLayerRotMap[mDpy]->clear();
return false;
}
//For the mdp, since either we are pre-rotating or MDP does flips
@@ -182,6 +194,7 @@
if(configMdp(ctx->mOverlay, parg, orient, sourceCrop, displayFrame,
NULL, mDest) < 0) {
ALOGE("%s: configMdp failed for dpy %d", __FUNCTION__, mDpy);
+ ctx->mLayerRotMap[mDpy]->clear();
ret = false;
}
}
@@ -212,9 +225,10 @@
}
//================= High res====================================
-FBUpdateSplit::FBUpdateSplit(const int& dpy): IFBUpdate(dpy) {}
+FBUpdateSplit::FBUpdateSplit(hwc_context_t *ctx, const int& dpy):
+ IFBUpdate(ctx, dpy) {}
-inline void FBUpdateSplit::reset() {
+void FBUpdateSplit::reset() {
IFBUpdate::reset();
mDestLeft = ovutils::OV_INVALID;
mDestRight = ovutils::OV_INVALID;
@@ -246,9 +260,10 @@
layer->compositionType = HWC_OVERLAY;
}
overlay::Overlay& ov = *(ctx->mOverlay);
- private_handle_t *hnd = (private_handle_t *)layer->handle;
- ovutils::Whf info(getWidth(hnd), getHeight(hnd),
- ovutils::getMdpFormat(hnd->format), hnd->size);
+
+ ovutils::Whf info(mAlignedFBWidth,
+ mAlignedFBHeight,
+ ovutils::getMdpFormat(HAL_PIXEL_FORMAT_RGBA_8888));
//Request left pipe
ovutils::eDest destL = ov.nextPipe(ovutils::OV_MDP_PIPE_ANY, mDpy,
@@ -353,6 +368,9 @@
ALOGE("%s: commit fails for right", __FUNCTION__);
ret = false;
}
+ if(ret == false) {
+ ctx->mLayerRotMap[mDpy]->clear();
+ }
}
return ret;
}
diff --git a/libhwcomposer/hwc_fbupdate.h b/libhwcomposer/hwc_fbupdate.h
index ce6af63..355e429 100644
--- a/libhwcomposer/hwc_fbupdate.h
+++ b/libhwcomposer/hwc_fbupdate.h
@@ -35,7 +35,7 @@
//Framebuffer update Interface
class IFBUpdate {
public:
- explicit IFBUpdate(const int& dpy) : mDpy(dpy) {}
+ explicit IFBUpdate(hwc_context_t *ctx, const int& dpy);
virtual ~IFBUpdate() {};
// Sets up members and prepares overlay if conditions are met
virtual bool prepare(hwc_context_t *ctx, hwc_display_contents_1 *list,
@@ -51,12 +51,14 @@
const int mDpy; // display to update
bool mModeOn; // if prepare happened
overlay::Rotator *mRot;
+ int mAlignedFBWidth;
+ int mAlignedFBHeight;
};
//Non-Split panel handler.
class FBUpdateNonSplit : public IFBUpdate {
public:
- explicit FBUpdateNonSplit(const int& dpy);
+ explicit FBUpdateNonSplit(hwc_context_t *ctx, const int& dpy);
virtual ~FBUpdateNonSplit() {};
bool prepare(hwc_context_t *ctx, hwc_display_contents_1 *list,
int fbZorder);
@@ -77,7 +79,7 @@
//Split panel handler.
class FBUpdateSplit : public IFBUpdate {
public:
- explicit FBUpdateSplit(const int& dpy);
+ explicit FBUpdateSplit(hwc_context_t *ctx, const int& dpy);
virtual ~FBUpdateSplit() {};
bool prepare(hwc_context_t *ctx, hwc_display_contents_1 *list,
int fbZorder);
diff --git a/libhwcomposer/hwc_mdpcomp.cpp b/libhwcomposer/hwc_mdpcomp.cpp
index 7d4fa3e..b4192b6 100644
--- a/libhwcomposer/hwc_mdpcomp.cpp
+++ b/libhwcomposer/hwc_mdpcomp.cpp
@@ -43,8 +43,9 @@
bool MDPComp::sEnableMixedMode = true;
bool MDPComp::sEnablePartialFrameUpdate = false;
int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
-float MDPComp::sMaxBw = 2.3f;
+double MDPComp::sMaxBw = 0.0;
double MDPComp::sBwClaimed = 0.0;
+bool MDPComp::sEnable4k2kYUVSplit = false;
MDPComp* MDPComp::getObject(hwc_context_t *ctx, const int& dpy) {
if(isDisplaySplit(ctx, dpy)) {
@@ -128,13 +129,6 @@
sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER);
}
- if(property_get("debug.mdpcomp.bw", property, "0") > 0) {
- float val = atof(property);
- if(val > 0.0f) {
- sMaxBw = val;
- }
- }
-
if(ctx->mMDP.panel != MIPI_CMD_PANEL) {
// Idle invalidation is not necessary on command mode panels
long idle_timeout = DEFAULT_IDLE_TIME;
@@ -154,6 +148,12 @@
idleInvalidator->init(timeout_handler, ctx, idle_timeout);
}
}
+
+ if((property_get("debug.mdpcomp.4k2kSplit", property, "0") > 0) &&
+ (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
+ (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
+ sEnable4k2kYUVSplit = true;
+ }
return true;
}
@@ -449,7 +449,7 @@
/* Reset frame ROI when any layer which needs scaling also needs ROI
* cropping */
if((res_w != dst_w || res_h != dst_h) &&
- needsScaling (ctx, layer, mDpy)) {
+ needsScaling (layer)) {
ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__);
memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
mCurrentFrame.dropCount = 0;
@@ -530,12 +530,6 @@
return false;
}
- if(ctx->listStats[mDpy].needsAlphaScale
- && ctx->mMDP.version < qdutils::MDSS_V5) {
- ALOGD_IF(isDebug(), "%s: frame needs alpha downscaling",__FUNCTION__);
- return false;
- }
-
for(int i = 0; i < numAppLayers; ++i) {
hwc_layer_1_t* layer = &list->hwLayers[i];
private_handle_t *hnd = (private_handle_t *)layer->handle;
@@ -568,6 +562,12 @@
} else if(partialMDPComp(ctx, list)) {
ret = true;
}
+
+ if(!hwLimitationsCheck(ctx, list)) {
+ ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__);
+ return false;
+ }
+
return ret;
}
@@ -586,6 +586,15 @@
ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
return false;
}
+
+ //For 8x26, if there is only one layer which needs scale for secondary
+ //while no scale for primary display, DMA pipe is occupied by primary.
+ //If need to fall back to GLES composition, virtual display lacks DMA
+ //pipe and error is reported.
+ if(qdutils::MDPVersion::getInstance().is8x26() &&
+ mDpy >= HWC_DISPLAY_EXTERNAL &&
+ qhwc::needsScaling(layer))
+ return false;
}
mCurrentFrame.fbCount = 0;
mCurrentFrame.fbZ = -1;
@@ -594,6 +603,10 @@
mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount -
mCurrentFrame.dropCount;
+ if(sEnable4k2kYUVSplit){
+ modifymdpCountfor4k2k(ctx, list);
+ }
+
if(!resourceCheck(ctx, list)) {
ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
return false;
@@ -649,6 +662,10 @@
int mdpCount = mCurrentFrame.mdpCount;
+ if(sEnable4k2kYUVSplit){
+ modifymdpCountfor4k2k(ctx, list);
+ }
+
//Will benefit cases where a video has non-updating background.
if((mDpy > HWC_DISPLAY_PRIMARY) and
(mdpCount > MAX_SEC_LAYERS)) {
@@ -719,6 +736,10 @@
mCurrentFrame.fbCount = batchSize;
mCurrentFrame.mdpCount = mCurrentFrame.layerCount - batchSize;
+ if(sEnable4k2kYUVSplit){
+ modifymdpCountfor4k2k(ctx, list);
+ }
+
if(!resourceCheck(ctx, list)) {
ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
return false;
@@ -770,6 +791,10 @@
mCurrentFrame.fbCount = fbBatchSize;
mCurrentFrame.mdpCount = mCurrentFrame.layerCount - fbBatchSize;
+ if(sEnable4k2kYUVSplit){
+ modifymdpCountfor4k2k(ctx, list);
+ }
+
if(!resourceCheck(ctx, list)) {
ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
return false;
@@ -1098,41 +1123,28 @@
MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
cur_pipe->zOrder = mdpNextZOrder++;
-
- if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
- ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
- layer %d",__FUNCTION__, index);
- return false;
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
+ if(configure4k2kYuv(ctx, layer,
+ mCurrentFrame.mdpToLayer[mdpIndex])
+ != 0 ){
+ ALOGD_IF(isDebug(), "%s: Failed to configure split pipes \
+ for layer %d",__FUNCTION__, index);
+ return false;
+ }
+ else{
+ mdpNextZOrder++;
+ }
+ continue;
}
- }
- }
-
- return true;
-}
-
-bool MDPComp::programYUV(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
- if(!allocLayerPipes(ctx, list)) {
- ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__);
- return false;
- }
- //If we are in this block, it means we have yuv + rgb layers both
- int mdpIdx = 0;
- for (int index = 0; index < mCurrentFrame.layerCount; index++) {
- if(!mCurrentFrame.isFBComposed[index]) {
- hwc_layer_1_t* layer = &list->hwLayers[index];
- int mdpIndex = mCurrentFrame.layerToMDP[index];
- MdpPipeInfo* cur_pipe =
- mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
- cur_pipe->zOrder = mdpIdx++;
-
- if(configure(ctx, layer,
- mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
+ if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
layer %d",__FUNCTION__, index);
return false;
}
}
}
+
return true;
}
@@ -1148,7 +1160,7 @@
return false;
}
- uint32_t size = calcMDPBytesRead(ctx, list);
+ double size = calcMDPBytesRead(ctx, list);
if(!bandwidthCheck(ctx, size)) {
ALOGD_IF(isDebug(), "%s: Exceeds bandwidth",__FUNCTION__);
return false;
@@ -1157,12 +1169,15 @@
return true;
}
-uint32_t MDPComp::calcMDPBytesRead(hwc_context_t *ctx,
+double MDPComp::calcMDPBytesRead(hwc_context_t *ctx,
hwc_display_contents_1_t* list) {
- uint32_t size = 0;
+ double size = 0;
+ const double GIG = 1000000000.0;
- if(!qdutils::MDPVersion::getInstance().is8x74v2())
- return 0;
+ //Skip for targets where no device tree value for bw is supplied
+ if(sMaxBw <= 0.0) {
+ return 0.0;
+ }
for (uint32_t i = 0; i < list->numHwLayers - 1; i++) {
if(!mCurrentFrame.isFBComposed[i]) {
@@ -1172,32 +1187,78 @@
hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
hwc_rect_t dst = layer->displayFrame;
float bpp = ((float)hnd->size) / (hnd->width * hnd->height);
- size += bpp * (crop.right - crop.left) *
- (crop.bottom - crop.top) *
- ctx->dpyAttr[mDpy].yres / (dst.bottom - dst.top);
+ size += (bpp * (crop.right - crop.left) *
+ (crop.bottom - crop.top) *
+ ctx->dpyAttr[mDpy].yres / (dst.bottom - dst.top)) /
+ GIG;
}
}
}
if(mCurrentFrame.fbCount) {
hwc_layer_1_t* layer = &list->hwLayers[list->numHwLayers - 1];
- private_handle_t *hnd = (private_handle_t *)layer->handle;
- if (hnd)
- size += hnd->size;
+ int tempw, temph;
+ size += (getBufferSizeAndDimensions(
+ layer->displayFrame.right - layer->displayFrame.left,
+ layer->displayFrame.bottom - layer->displayFrame.top,
+ HAL_PIXEL_FORMAT_RGBA_8888,
+ tempw, temph)) / GIG;
}
return size;
}
-bool MDPComp::bandwidthCheck(hwc_context_t *ctx, const uint32_t& size) {
- //Will be added for other targets if we run into bandwidth issues and when
- //we have profiling data to set an upper limit.
- if(qdutils::MDPVersion::getInstance().is8x74v2()) {
- const uint32_t ONE_GIG = 1000 * 1000 * 1000;
- double panelRefRate =
- 1000000000.0 / ctx->dpyAttr[mDpy].vsync_period;
- if((size * panelRefRate) > ((sMaxBw - sBwClaimed) * ONE_GIG)) {
- return false;
+bool MDPComp::bandwidthCheck(hwc_context_t *ctx, const double& size) {
+ //Skip for targets where no device tree value for bw is supplied
+ if(sMaxBw <= 0.0) {
+ return true;
+ }
+
+ double panelRefRate =
+ 1000000000.0 / ctx->dpyAttr[mDpy].vsync_period;
+ if((size * panelRefRate) > (sMaxBw - sBwClaimed)) {
+ return false;
+ }
+ return true;
+}
+
+bool MDPComp::hwLimitationsCheck(hwc_context_t* ctx,
+ hwc_display_contents_1_t* list) {
+
+ //A-family hw limitation:
+ //If a layer need alpha scaling, MDP can not support.
+ if(ctx->mMDP.version < qdutils::MDSS_V5) {
+ for(int i = 0; i < mCurrentFrame.layerCount; ++i) {
+ if(!mCurrentFrame.isFBComposed[i] &&
+ isAlphaScaled( &list->hwLayers[i])) {
+ ALOGD_IF(isDebug(), "%s:frame needs alphaScaling",__FUNCTION__);
+ return false;
+ }
+ }
+ }
+
+ // On 8x26 & 8974 hw, we have a limitation of downscaling+blending.
+ //If multiple layers requires downscaling and also they are overlapping
+ //fall back to GPU since MDSS can not handle it.
+ if(qdutils::MDPVersion::getInstance().is8x74v2() ||
+ qdutils::MDPVersion::getInstance().is8x26()) {
+ for(int i = 0; i < mCurrentFrame.layerCount-1; ++i) {
+ hwc_layer_1_t* botLayer = &list->hwLayers[i];
+ if(!mCurrentFrame.isFBComposed[i] &&
+ isDownscaleRequired(botLayer)) {
+ //if layer-i is marked for MDP and needs downscaling
+ //check if any MDP layer on top of i & overlaps with layer-i
+ for(int j = i+1; j < mCurrentFrame.layerCount; ++j) {
+ hwc_layer_1_t* topLayer = &list->hwLayers[j];
+ if(!mCurrentFrame.isFBComposed[j] &&
+ isDownscaleRequired(topLayer)) {
+ hwc_rect_t r = getIntersection(botLayer->displayFrame,
+ topLayer->displayFrame);
+ if(isValidRect(r))
+ return false;
+ }
+ }
+ }
}
}
return true;
@@ -1206,6 +1267,7 @@
int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
int ret = 0;
const int numLayers = ctx->listStats[mDpy].numAppLayers;
+ MDPVersion& mdpVersion = qdutils::MDPVersion::getInstance();
//reset old data
mCurrentFrame.reset(numLayers);
@@ -1233,11 +1295,31 @@
generateROI(ctx, list);
+ //Convert from kbps to gbps
+ sMaxBw = mdpVersion.getHighBw() / 1000000.0;
+ if (ctx->mExtDisplay->isConnected() || ctx->mMDP.panel != MIPI_CMD_PANEL) {
+ sMaxBw = mdpVersion.getLowBw() / 1000000.0;
+ }
+
//Check whether layers marked for MDP Composition is actually doable.
if(isFullFrameDoable(ctx, list)) {
mCurrentFrame.map();
//Configure framebuffer first if applicable
if(mCurrentFrame.fbZ >= 0) {
+ //If 4k2k Yuv layer split is possible, and if
+ //fbz is above 4k2k layer, increment fb zorder by 1
+ //as we split 4k2k layer and increment zorder for right half
+ //of the layer
+ if(sEnable4k2kYUVSplit){
+ int n4k2kYuvCount = ctx->listStats[mDpy].yuv4k2kCount;
+ for(int index = 0; index < n4k2kYuvCount; index++){
+ int n4k2kYuvIndex =
+ ctx->listStats[mDpy].yuv4k2kIndices[index];
+ if(mCurrentFrame.fbZ > n4k2kYuvIndex){
+ mCurrentFrame.fbZ += 1;
+ }
+ }
+ }
if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list,
mCurrentFrame.fbZ)) {
ALOGE("%s configure framebuffer failed", __func__);
@@ -1268,6 +1350,11 @@
//Try to compose atleast YUV layers through MDP comp and let
//all the RGB layers compose in FB
//Destination over
+
+ if(sEnable4k2kYUVSplit){
+ modifymdpCountfor4k2k(ctx, list);
+ }
+
mCurrentFrame.fbZ = -1;
if(mCurrentFrame.fbCount)
mCurrentFrame.fbZ = mCurrentFrame.mdpCount;
@@ -1284,7 +1371,7 @@
goto exit;
}
}
- if(!programYUV(ctx, list)) {
+ if(!programMDP(ctx, list)) {
reset(numLayers, list);
ctx->mOverlay->clear(mDpy);
ret = -1;
@@ -1311,14 +1398,51 @@
}
exit:
- //gbps (bytes / nanosec = gigabytes / sec)
- sBwClaimed += calcMDPBytesRead(ctx, list) /
- (double)ctx->dpyAttr[mDpy].vsync_period;
+ double panelRefRate =
+ 1000000000.0 / ctx->dpyAttr[mDpy].vsync_period;
+ sBwClaimed += calcMDPBytesRead(ctx, list) * panelRefRate;
return ret;
}
+bool MDPComp::allocSplitVGPipesfor4k2k(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list, int index) {
+
+ bool bRet = true;
+ hwc_layer_1_t* layer = &list->hwLayers[index];
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ int mdpIndex = mCurrentFrame.layerToMDP[index];
+ PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
+ info.pipeInfo = new MdpYUVPipeInfo;
+ info.rot = NULL;
+ MdpYUVPipeInfo& pipe_info = *(MdpYUVPipeInfo*)info.pipeInfo;
+ ePipeType type = MDPCOMP_OV_VG;
+
+ pipe_info.lIndex = ovutils::OV_INVALID;
+ pipe_info.rIndex = ovutils::OV_INVALID;
+
+ pipe_info.lIndex = getMdpPipe(ctx, type, Overlay::MIXER_DEFAULT);
+ if(pipe_info.lIndex == ovutils::OV_INVALID){
+ bRet = false;
+ ALOGD_IF(isDebug(),"%s: allocating first VG pipe failed",
+ __FUNCTION__);
+ }
+ pipe_info.rIndex = getMdpPipe(ctx, type, Overlay::MIXER_DEFAULT);
+ if(pipe_info.rIndex == ovutils::OV_INVALID){
+ bRet = false;
+ ALOGD_IF(isDebug(),"%s: allocating second VG pipe failed",
+ __FUNCTION__);
+ }
+ return bRet;
+}
//=============MDPCompNonSplit===================================================
+void MDPCompNonSplit::modifymdpCountfor4k2k(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list){
+ //As we split 4kx2k yuv layer and program to 2 VG pipes
+ //(if available) increase mdpcount accordingly
+ mCurrentFrame.mdpCount += ctx->listStats[mDpy].yuv4k2kCount;
+}
+
/*
* Configures pipe(s) for MDP composition
*/
@@ -1370,7 +1494,10 @@
hwc_layer_1_t* layer = &list->hwLayers[i];
hwc_rect_t dst = layer->displayFrame;
private_handle_t *hnd = (private_handle_t *)layer->handle;
- if(isYuvBuffer(hnd)) {
+ if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
+ pipesNeeded = pipesNeeded + 2;
+ }
+ else if(isYuvBuffer(hnd)) {
pipesNeeded++;
}
}
@@ -1395,6 +1522,12 @@
hwc_layer_1_t* layer = &list->hwLayers[index];
private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
+ if(allocSplitVGPipesfor4k2k(ctx, list, index)){
+ continue;
+ }
+ }
+
int mdpIndex = mCurrentFrame.layerToMDP[index];
PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
info.pipeInfo = new MdpPipeInfoNonSplit;
@@ -1404,7 +1537,7 @@
if(isYuvBuffer(hnd)) {
type = MDPCOMP_OV_VG;
- } else if(!qhwc::needsScaling(ctx, layer, mDpy)
+ } else if(!qhwc::needsScaling(layer)
&& Overlay::getDMAMode() != Overlay::DMA_BLOCK_MODE
&& ctx->mMDP.version >= qdutils::MDSS_V5) {
type = MDPCOMP_OV_DMA;
@@ -1420,6 +1553,20 @@
return true;
}
+int MDPCompNonSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
+ PipeLayerPair& PipeLayerPair) {
+ MdpYUVPipeInfo& mdp_info =
+ *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
+ eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
+ eIsFg isFg = IS_FG_OFF;
+ eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
+ eDest lDest = mdp_info.lIndex;
+ eDest rDest = mdp_info.rIndex;
+
+ return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg,
+ lDest, rDest, &PipeLayerPair.rot);
+}
+
bool MDPCompNonSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
if(!isEnabled()) {
@@ -1463,36 +1610,75 @@
int mdpIndex = mCurrentFrame.layerToMDP[i];
- MdpPipeInfoNonSplit& pipe_info =
+ if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit)
+ {
+ MdpYUVPipeInfo& pipe_info =
+ *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
+ Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
+ ovutils::eDest indexL = pipe_info.lIndex;
+ ovutils::eDest indexR = pipe_info.rIndex;
+ int fd = hnd->fd;
+ uint32_t offset = hnd->offset;
+ if(rot) {
+ rot->queueBuffer(fd, offset);
+ fd = rot->getDstMemId();
+ offset = rot->getDstOffset();
+ }
+ if(indexL != ovutils::OV_INVALID) {
+ ovutils::eDest destL = (ovutils::eDest)indexL;
+ ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
+ using pipe: %d", __FUNCTION__, layer, hnd, indexL );
+ if (!ov.queueBuffer(fd, offset, destL)) {
+ ALOGE("%s: queueBuffer failed for display:%d",
+ __FUNCTION__, mDpy);
+ return false;
+ }
+ }
+
+ if(indexR != ovutils::OV_INVALID) {
+ ovutils::eDest destR = (ovutils::eDest)indexR;
+ ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
+ using pipe: %d", __FUNCTION__, layer, hnd, indexR );
+ if (!ov.queueBuffer(fd, offset, destR)) {
+ ALOGE("%s: queueBuffer failed for display:%d",
+ __FUNCTION__, mDpy);
+ return false;
+ }
+ }
+ }
+ else{
+ MdpPipeInfoNonSplit& pipe_info =
*(MdpPipeInfoNonSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
- ovutils::eDest dest = pipe_info.index;
- if(dest == ovutils::OV_INVALID) {
- ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest);
- return false;
- }
-
- if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
- continue;
- }
-
- ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
- using pipe: %d", __FUNCTION__, layer,
- hnd, dest );
-
- int fd = hnd->fd;
- uint32_t offset = hnd->offset;
-
- Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
- if(rot) {
- if(!rot->queueBuffer(fd, offset))
+ ovutils::eDest dest = pipe_info.index;
+ if(dest == ovutils::OV_INVALID) {
+ ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest);
return false;
- fd = rot->getDstMemId();
- offset = rot->getDstOffset();
- }
+ }
- if (!ov.queueBuffer(fd, offset, dest)) {
- ALOGE("%s: queueBuffer failed for display:%d ", __FUNCTION__, mDpy);
- return false;
+ if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
+ continue;
+ }
+
+ ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
+ using pipe: %d", __FUNCTION__, layer,
+ hnd, dest );
+
+ int fd = hnd->fd;
+ uint32_t offset = hnd->offset;
+
+ Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
+ if(rot) {
+ if(!rot->queueBuffer(fd, offset))
+ return false;
+ fd = rot->getDstMemId();
+ offset = rot->getDstOffset();
+ }
+
+ if (!ov.queueBuffer(fd, offset, dest)) {
+ ALOGE("%s: queueBuffer failed for display:%d ",
+ __FUNCTION__, mDpy);
+ return false;
+ }
}
layerProp[i].mFlags &= ~HWC_MDPCOMP;
@@ -1502,6 +1688,23 @@
//=============MDPCompSplit===================================================
+void MDPCompSplit::modifymdpCountfor4k2k(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list){
+ //if 4kx2k yuv layer is totally present in either in left half
+ //or right half then try splitting the yuv layer to avoid decimation
+ int n4k2kYuvCount = ctx->listStats[mDpy].yuv4k2kCount;
+ const int lSplit = getLeftSplit(ctx, mDpy);
+ for(int index = 0; index < n4k2kYuvCount; index++){
+ int n4k2kYuvIndex = ctx->listStats[mDpy].yuv4k2kIndices[index];
+ hwc_layer_1_t* layer = &list->hwLayers[n4k2kYuvIndex];
+ hwc_rect_t dst = layer->displayFrame;
+
+ if((dst.left > lSplit)||(dst.right < lSplit)){
+ mCurrentFrame.mdpCount += 1;
+ }
+ }
+}
+
int MDPCompSplit::pipesNeeded(hwc_context_t *ctx,
hwc_display_contents_1_t* list,
int mixer) {
@@ -1574,6 +1777,12 @@
hwc_layer_1_t* layer = &list->hwLayers[i];
hwc_rect_t dst = layer->displayFrame;
private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
+ if((dst.left > lSplit)||(dst.right < lSplit)){
+ pipesNeeded = pipesNeeded + 2;
+ continue;
+ }
+ }
if(isYuvBuffer(hnd)) {
if(dst.left < lSplit) {
pipesNeeded++;
@@ -1629,6 +1838,15 @@
hwc_layer_1_t* layer = &list->hwLayers[index];
private_handle_t *hnd = (private_handle_t *)layer->handle;
+ hwc_rect_t dst = layer->displayFrame;
+ const int lSplit = getLeftSplit(ctx, mDpy);
+ if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
+ if((dst.left > lSplit)||(dst.right < lSplit)){
+ if(allocSplitVGPipesfor4k2k(ctx, list, index)){
+ continue;
+ }
+ }
+ }
int mdpIndex = mCurrentFrame.layerToMDP[index];
PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
info.pipeInfo = new MdpPipeInfoSplit;
@@ -1653,6 +1871,27 @@
return true;
}
+int MDPCompSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
+ PipeLayerPair& PipeLayerPair) {
+ const int lSplit = getLeftSplit(ctx, mDpy);
+ hwc_rect_t dst = layer->displayFrame;
+ if((dst.left > lSplit)||(dst.right < lSplit)){
+ MdpYUVPipeInfo& mdp_info =
+ *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
+ eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
+ eIsFg isFg = IS_FG_OFF;
+ eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
+ eDest lDest = mdp_info.lIndex;
+ eDest rDest = mdp_info.rIndex;
+
+ return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg,
+ lDest, rDest, &PipeLayerPair.rot);
+ }
+ else{
+ return configure(ctx, layer, PipeLayerPair);
+ }
+}
+
/*
* Configures pipe(s) for MDP composition
*/
@@ -1715,48 +1954,88 @@
int mdpIndex = mCurrentFrame.layerToMDP[i];
- MdpPipeInfoSplit& pipe_info =
- *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
- Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
+ if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit)
+ {
+ MdpYUVPipeInfo& pipe_info =
+ *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
+ Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
+ ovutils::eDest indexL = pipe_info.lIndex;
+ ovutils::eDest indexR = pipe_info.rIndex;
+ int fd = hnd->fd;
+ uint32_t offset = hnd->offset;
+ if(rot) {
+ rot->queueBuffer(fd, offset);
+ fd = rot->getDstMemId();
+ offset = rot->getDstOffset();
+ }
+ if(indexL != ovutils::OV_INVALID) {
+ ovutils::eDest destL = (ovutils::eDest)indexL;
+ ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
+ using pipe: %d", __FUNCTION__, layer, hnd, indexL );
+ if (!ov.queueBuffer(fd, offset, destL)) {
+ ALOGE("%s: queueBuffer failed for display:%d",
+ __FUNCTION__, mDpy);
+ return false;
+ }
+ }
- ovutils::eDest indexL = pipe_info.lIndex;
- ovutils::eDest indexR = pipe_info.rIndex;
-
- int fd = hnd->fd;
- int offset = hnd->offset;
-
- if(ctx->mAD->isModeOn()) {
- if(ctx->mAD->draw(ctx, fd, offset)) {
- fd = ctx->mAD->getDstFd(ctx);
- offset = ctx->mAD->getDstOffset(ctx);
+ if(indexR != ovutils::OV_INVALID) {
+ ovutils::eDest destR = (ovutils::eDest)indexR;
+ ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
+ using pipe: %d", __FUNCTION__, layer, hnd, indexR );
+ if (!ov.queueBuffer(fd, offset, destR)) {
+ ALOGE("%s: queueBuffer failed for display:%d",
+ __FUNCTION__, mDpy);
+ return false;
+ }
}
}
+ else{
+ MdpPipeInfoSplit& pipe_info =
+ *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
+ Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
- if(rot) {
- rot->queueBuffer(fd, offset);
- fd = rot->getDstMemId();
- offset = rot->getDstOffset();
- }
+ ovutils::eDest indexL = pipe_info.lIndex;
+ ovutils::eDest indexR = pipe_info.rIndex;
- //************* play left mixer **********
- if(indexL != ovutils::OV_INVALID) {
- ovutils::eDest destL = (ovutils::eDest)indexL;
- ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
- using pipe: %d", __FUNCTION__, layer, hnd, indexL );
- if (!ov.queueBuffer(fd, offset, destL)) {
- ALOGE("%s: queueBuffer failed for left mixer", __FUNCTION__);
- return false;
+ int fd = hnd->fd;
+ int offset = hnd->offset;
+
+ if(ctx->mAD->isModeOn()) {
+ if(ctx->mAD->draw(ctx, fd, offset)) {
+ fd = ctx->mAD->getDstFd(ctx);
+ offset = ctx->mAD->getDstOffset(ctx);
+ }
}
- }
- //************* play right mixer **********
- if(indexR != ovutils::OV_INVALID) {
- ovutils::eDest destR = (ovutils::eDest)indexR;
- ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
- using pipe: %d", __FUNCTION__, layer, hnd, indexR );
- if (!ov.queueBuffer(fd, offset, destR)) {
- ALOGE("%s: queueBuffer failed for right mixer", __FUNCTION__);
- return false;
+ if(rot) {
+ rot->queueBuffer(fd, offset);
+ fd = rot->getDstMemId();
+ offset = rot->getDstOffset();
+ }
+
+ //************* play left mixer **********
+ if(indexL != ovutils::OV_INVALID) {
+ ovutils::eDest destL = (ovutils::eDest)indexL;
+ ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
+ using pipe: %d", __FUNCTION__, layer, hnd, indexL );
+ if (!ov.queueBuffer(fd, offset, destL)) {
+ ALOGE("%s: queueBuffer failed for left mixer",
+ __FUNCTION__);
+ return false;
+ }
+ }
+
+ //************* play right mixer **********
+ if(indexR != ovutils::OV_INVALID) {
+ ovutils::eDest destR = (ovutils::eDest)indexR;
+ ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
+ using pipe: %d", __FUNCTION__, layer, hnd, indexR );
+ if (!ov.queueBuffer(fd, offset, destR)) {
+ ALOGE("%s: queueBuffer failed for right mixer",
+ __FUNCTION__);
+ return false;
+ }
}
}
diff --git a/libhwcomposer/hwc_mdpcomp.h b/libhwcomposer/hwc_mdpcomp.h
index adf74bb..8e9b1be 100644
--- a/libhwcomposer/hwc_mdpcomp.h
+++ b/libhwcomposer/hwc_mdpcomp.h
@@ -70,6 +70,12 @@
virtual ~MdpPipeInfo(){};
};
+ struct MdpYUVPipeInfo : public MdpPipeInfo{
+ ovutils::eDest lIndex;
+ ovutils::eDest rIndex;
+ virtual ~MdpYUVPipeInfo(){};
+ };
+
/* per layer data */
struct PipeLayerPair {
MdpPipeInfo *pipeInfo;
@@ -133,7 +139,12 @@
/* Checks for pipes needed versus pipes available */
virtual bool arePipesAvailable(hwc_context_t *ctx,
hwc_display_contents_1_t* list) = 0;
-
+ /* increments mdpCount if 4k2k yuv layer split is enabled*/
+ virtual void modifymdpCountfor4k2k(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list) = 0;
+ /* configures 4kx2k yuv layer*/
+ virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
+ PipeLayerPair& PipeLayerPair) = 0;
/* set/reset flags for MDPComp */
void setMDPCompLayerFlags(hwc_context_t *ctx,
hwc_display_contents_1_t* list);
@@ -165,11 +176,14 @@
bool secureOnly);
/* checks for conditions where YUV layers cannot be bypassed */
bool isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
- /* calcs bytes read by MDP for a given frame */
- uint32_t calcMDPBytesRead(hwc_context_t *ctx,
+ /* calcs bytes read by MDP in gigs for a given frame */
+ double calcMDPBytesRead(hwc_context_t *ctx,
hwc_display_contents_1_t* list);
/* checks if the required bandwidth exceeds a certain max */
- bool bandwidthCheck(hwc_context_t *ctx, const uint32_t& size);
+ bool bandwidthCheck(hwc_context_t *ctx, const double& size);
+ /* checks if MDP/MDSS can process current list w.r.to HW limitations
+ * All peculiar HW limitations should go here */
+ bool hwLimitationsCheck(hwc_context_t* ctx, hwc_display_contents_1_t* list);
/* generates ROI based on the modified area of the frame */
void generateROI(hwc_context_t *ctx, hwc_display_contents_1_t* list);
bool validateAndApplyROI(hwc_context_t *ctx, hwc_display_contents_1_t* list,
@@ -198,7 +212,6 @@
void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
bool secureOnly);
bool programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list);
- bool programYUV(hwc_context_t *ctx, hwc_display_contents_1_t* list);
void reset(const int& numAppLayers, hwc_display_contents_1_t* list);
bool isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer);
bool resourceCheck(hwc_context_t *ctx, hwc_display_contents_1_t *list);
@@ -212,13 +225,17 @@
static bool sIdleFallBack;
static int sMaxPipesPerMixer;
//Max bandwidth. Value is in GBPS. For ex: 2.3 means 2.3GBPS
- static float sMaxBw;
+ static double sMaxBw;
//Tracks composition bandwidth claimed. Represented as the total
//w*h*bpp*fps (gigabytes-per-second) going to MDP mixers.
static double sBwClaimed;
static IdleInvalidator *idleInvalidator;
struct FrameInfo mCurrentFrame;
struct LayerCache mCachedFrame;
+ //Enable 4kx2k yuv layer split
+ static bool sEnable4k2kYUVSplit;
+ bool allocSplitVGPipesfor4k2k(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list, int index);
};
class MDPCompNonSplit : public MDPComp {
@@ -248,6 +265,14 @@
/* Checks for video pipes needed versus pipes available */
virtual bool areVGPipesAvailable(hwc_context_t *ctx,
hwc_display_contents_1_t* list);
+
+ /* increments mdpCount if 4k2k yuv layer split is enabled*/
+ virtual void modifymdpCountfor4k2k(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list);
+
+ /* configures 4kx2k yuv layer to 2 VG pipes*/
+ virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
+ PipeLayerPair& PipeLayerPair);
};
class MDPCompSplit : public MDPComp {
@@ -281,6 +306,14 @@
virtual bool areVGPipesAvailable(hwc_context_t *ctx,
hwc_display_contents_1_t* list);
+ /* increments mdpCount if 4k2k yuv layer split is enabled*/
+ virtual void modifymdpCountfor4k2k(hwc_context_t *ctx,
+ hwc_display_contents_1_t* list);
+
+ /* configures 4kx2k yuv layer*/
+ virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
+ PipeLayerPair& PipeLayerPair);
+
int pipesNeeded(hwc_context_t *ctx, hwc_display_contents_1_t* list,
int mixer);
};
diff --git a/libhwcomposer/hwc_uevents.cpp b/libhwcomposer/hwc_uevents.cpp
index 5cb87f8..10afc92 100644
--- a/libhwcomposer/hwc_uevents.cpp
+++ b/libhwcomposer/hwc_uevents.cpp
@@ -55,7 +55,7 @@
if (compositionType & (qdutils::COMPOSITION_TYPE_DYN |
qdutils::COMPOSITION_TYPE_MDP |
qdutils::COMPOSITION_TYPE_C2D)) {
- ctx->mCopyBit[dpy] = new CopyBit();
+ ctx->mCopyBit[dpy] = new CopyBit(ctx, dpy);
}
}
diff --git a/libhwcomposer/hwc_utils.cpp b/libhwcomposer/hwc_utils.cpp
index 216e83a..095189f 100644
--- a/libhwcomposer/hwc_utils.cpp
+++ b/libhwcomposer/hwc_utils.cpp
@@ -150,7 +150,8 @@
if (compositionType & (qdutils::COMPOSITION_TYPE_DYN |
qdutils::COMPOSITION_TYPE_MDP |
qdutils::COMPOSITION_TYPE_C2D)) {
- ctx->mCopyBit[HWC_DISPLAY_PRIMARY] = new CopyBit();
+ ctx->mCopyBit[HWC_DISPLAY_PRIMARY] = new CopyBit(ctx,
+ HWC_DISPLAY_PRIMARY);
}
ctx->mExtDisplay = new ExternalDisplay(ctx);
@@ -606,8 +607,21 @@
return extOrientation;
}
-bool needsScaling(hwc_context_t* ctx, hwc_layer_1_t const* layer,
- const int& dpy) {
+bool isDownscaleRequired(hwc_layer_1_t const* layer) {
+ hwc_rect_t displayFrame = layer->displayFrame;
+ hwc_rect_t sourceCrop = integerizeSourceCrop(layer->sourceCropf);
+ int dst_w, dst_h, src_w, src_h;
+ dst_w = displayFrame.right - displayFrame.left;
+ dst_h = displayFrame.bottom - displayFrame.top;
+ src_w = sourceCrop.right - sourceCrop.left;
+ src_h = sourceCrop.bottom - sourceCrop.top;
+
+ if(((src_w > dst_w) || (src_h > dst_h)))
+ return true;
+
+ return false;
+}
+bool needsScaling(hwc_layer_1_t const* layer) {
int dst_w, dst_h, src_w, src_h;
hwc_rect_t displayFrame = layer->displayFrame;
@@ -676,9 +690,8 @@
return false;
}
-bool isAlphaScaled(hwc_context_t* ctx, hwc_layer_1_t const* layer,
- const int& dpy) {
- if(needsScaling(ctx, layer, dpy) && isAlphaPresent(layer)) {
+bool isAlphaScaled(hwc_layer_1_t const* layer) {
+ if(needsScaling(layer) && isAlphaPresent(layer)) {
return true;
}
return false;
@@ -733,7 +746,6 @@
ctx->listStats[dpy].numAppLayers = list->numHwLayers - 1;
ctx->listStats[dpy].fbLayerIndex = list->numHwLayers - 1;
ctx->listStats[dpy].skipCount = 0;
- ctx->listStats[dpy].needsAlphaScale = false;
ctx->listStats[dpy].preMultipliedAlpha = false;
ctx->listStats[dpy].isSecurePresent = false;
ctx->listStats[dpy].yuvCount = 0;
@@ -743,6 +755,7 @@
ctx->listStats[dpy].roi = ovutils::Dim(0, 0,
(int)ctx->dpyAttr[dpy].xres, (int)ctx->dpyAttr[dpy].yres);
ctx->listStats[dpy].secureUI = false;
+ ctx->listStats[dpy].yuv4k2kCount = 0;
trimList(ctx, list, dpy);
optimizeLayerRects(ctx, list, dpy);
@@ -765,6 +778,7 @@
//reset yuv indices
ctx->listStats[dpy].yuvIndices[i] = -1;
+ ctx->listStats[dpy].yuv4k2kIndices[i] = -1;
if (isSecureBuffer(hnd)) {
ctx->listStats[dpy].isSecurePresent = true;
@@ -779,6 +793,12 @@
ctx->listStats[dpy].yuvIndices[yuvCount] = i;
yuvCount++;
+ if(UNLIKELY(is4kx2kYuvBuffer(hnd))){
+ int& yuv4k2kCount = ctx->listStats[dpy].yuv4k2kCount;
+ ctx->listStats[dpy].yuv4k2kIndices[yuv4k2kCount] = i;
+ yuv4k2kCount++;
+ }
+
if((layer->transform & HWC_TRANSFORM_ROT_90) &&
canUseRotator(ctx, dpy)) {
if( (dpy == HWC_DISPLAY_PRIMARY) &&
@@ -791,9 +811,6 @@
if(layer->blending == HWC_BLENDING_PREMULT)
ctx->listStats[dpy].preMultipliedAlpha = true;
- if(!ctx->listStats[dpy].needsAlphaScale)
- ctx->listStats[dpy].needsAlphaScale =
- isAlphaScaled(ctx, layer, dpy);
if(UNLIKELY(isExtOnly(hnd))){
ctx->listStats[dpy].extOnlyLayerIndex = i;
@@ -1048,7 +1065,7 @@
hwc_rect_t& topframe =
(hwc_rect_t&)list->hwLayers[i].displayFrame;
while(j >= 0) {
- if(!needsScaling(ctx, &list->hwLayers[j], dpy)) {
+ if(!needsScaling(&list->hwLayers[j])) {
hwc_layer_1_t* layer = (hwc_layer_1_t*)&list->hwLayers[j];
hwc_rect_t& bottomframe = layer->displayFrame;
hwc_rect_t& bottomCrop = layer->sourceCrop;
@@ -1056,10 +1073,11 @@
hwc_rect_t irect = getIntersection(bottomframe, topframe);
if(isValidRect(irect)) {
+ hwc_rect_t dest_rect;
//if intersection is valid rect, deduct it
- bottomframe = deductRect(bottomframe, irect);
+ dest_rect = deductRect(bottomframe, irect);
qhwc::calculate_crop_rects(bottomCrop, bottomframe,
- bottomframe, transform);
+ dest_rect, transform);
}
}
@@ -1253,7 +1271,7 @@
ovutils::eMdpFlags &mdpFlags,
int rotDownscale, int transform) {
private_handle_t *hnd = (private_handle_t *)layer->handle;
- MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
+ MetaData_t *metadata = hnd ? (MetaData_t *)hnd->base_metadata : NULL;
if(layer->blending == HWC_BLENDING_PREMULT) {
ovutils::setMdpFlags(mdpFlags,
@@ -1509,6 +1527,7 @@
if(configRotator(*rot, whf, crop, mdpFlags, orient, downscale) < 0) {
ALOGE("%s: configRotator failed!", __FUNCTION__);
ctx->mOverlay->clear(dpy);
+ ctx->mLayerRotMap[dpy]->clear();
return -1;
}
ctx->mLayerRotMap[dpy]->add(layer, *rot);
@@ -1526,6 +1545,7 @@
if(configMdp(ctx->mOverlay, parg, orient, crop, dst, metadata, dest) < 0) {
ALOGE("%s: commit failed for low res panel", __FUNCTION__);
+ ctx->mLayerRotMap[dpy]->clear();
return -1;
}
return 0;
@@ -1635,6 +1655,7 @@
if(configRotator(*rot, whf, crop, mdpFlagsL, orient, downscale) < 0) {
ALOGE("%s: configRotator failed!", __FUNCTION__);
ctx->mOverlay->clear(dpy);
+ ctx->mLayerRotMap[dpy]->clear();
return -1;
}
ctx->mLayerRotMap[dpy]->add(layer, *rot);
@@ -1700,6 +1721,7 @@
if(configMdp(ctx->mOverlay, pargL, orient,
tmp_cropL, tmp_dstL, metadata, lDest) < 0) {
ALOGE("%s: commit failed for left mixer config", __FUNCTION__);
+ ctx->mLayerRotMap[dpy]->clear();
return -1;
}
}
@@ -1715,6 +1737,130 @@
if(configMdp(ctx->mOverlay, pargR, orient,
tmp_cropR, tmp_dstR, metadata, rDest) < 0) {
ALOGE("%s: commit failed for right mixer config", __FUNCTION__);
+ ctx->mLayerRotMap[dpy]->clear();
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int configureSourceSplit(hwc_context_t *ctx, hwc_layer_1_t *layer,
+ const int& dpy, eMdpFlags& mdpFlagsL, eZorder& z,
+ eIsFg& isFg, const eDest& lDest, const eDest& rDest,
+ Rotator **rot) {
+ private_handle_t *hnd = (private_handle_t *)layer->handle;
+ if(!hnd) {
+ ALOGE("%s: layer handle is NULL", __FUNCTION__);
+ return -1;
+ }
+
+ MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
+
+ int hw_w = ctx->dpyAttr[dpy].xres;
+ int hw_h = ctx->dpyAttr[dpy].yres;
+ hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);;
+ hwc_rect_t dst = layer->displayFrame;
+ int transform = layer->transform;
+ eTransform orient = static_cast<eTransform>(transform);
+ const int downscale = 0;
+ int rotFlags = ROT_FLAGS_NONE;
+ //Splitting only YUV layer on primary panel needs different zorders
+ //for both layers as both the layers are configured to single mixer
+ eZorder lz = z;
+ eZorder rz = (eZorder)(z + 1);
+
+ Whf whf(getWidth(hnd), getHeight(hnd),
+ getMdpFormat(hnd->format), hnd->size);
+
+ setMdpFlags(layer, mdpFlagsL, 0, transform);
+ trimLayer(ctx, dpy, transform, crop, dst);
+
+ if(isYuvBuffer(hnd) && (transform & HWC_TRANSFORM_ROT_90)) {
+ (*rot) = ctx->mRotMgr->getNext();
+ if((*rot) == NULL) return -1;
+ if(!dpy)
+ BwcPM::setBwc(ctx, crop, dst, transform, mdpFlagsL);
+ //Configure rotator for pre-rotation
+ if(configRotator(*rot, whf, crop, mdpFlagsL, orient, downscale) < 0) {
+ ALOGE("%s: configRotator failed!", __FUNCTION__);
+ ctx->mOverlay->clear(dpy);
+ return -1;
+ }
+ ctx->mLayerRotMap[dpy]->add(layer, *rot);
+ whf.format = (*rot)->getDstFormat();
+ updateSource(orient, whf, crop);
+ rotFlags |= ROT_PREROTATED;
+ }
+
+ eMdpFlags mdpFlagsR = mdpFlagsL;
+ int lSplit = dst.left + (dst.right - dst.left)/2;
+
+ hwc_rect_t tmp_cropL = {0}, tmp_dstL = {0};
+ hwc_rect_t tmp_cropR = {0}, tmp_dstR = {0};
+
+ if(lDest != OV_INVALID) {
+ tmp_cropL = crop;
+ tmp_dstL = dst;
+ hwc_rect_t scissor = {dst.left, dst.top, lSplit, dst.bottom };
+ qhwc::calculate_crop_rects(tmp_cropL, tmp_dstL, scissor, 0);
+ }
+ if(rDest != OV_INVALID) {
+ tmp_cropR = crop;
+ tmp_dstR = dst;
+ hwc_rect_t scissor = {lSplit, dst.top, dst.right, dst.bottom };
+ qhwc::calculate_crop_rects(tmp_cropR, tmp_dstR, scissor, 0);
+ }
+
+ sanitizeSourceCrop(tmp_cropL, tmp_cropR, hnd);
+
+ //When buffer is H-flipped, contents of mixer config also needs to swapped
+ //Not needed if the layer is confined to one half of the screen.
+ //If rotator has been used then it has also done the flips, so ignore them.
+ if((orient & OVERLAY_TRANSFORM_FLIP_H) && lDest != OV_INVALID
+ && rDest != OV_INVALID && (*rot) == NULL) {
+ hwc_rect_t new_cropR;
+ new_cropR.left = tmp_cropL.left;
+ new_cropR.right = new_cropR.left + (tmp_cropR.right - tmp_cropR.left);
+
+ hwc_rect_t new_cropL;
+ new_cropL.left = new_cropR.right;
+ new_cropL.right = tmp_cropR.right;
+
+ tmp_cropL.left = new_cropL.left;
+ tmp_cropL.right = new_cropL.right;
+
+ tmp_cropR.left = new_cropR.left;
+ tmp_cropR.right = new_cropR.right;
+
+ }
+
+ //For the mdp, since either we are pre-rotating or MDP does flips
+ orient = OVERLAY_TRANSFORM_0;
+ transform = 0;
+
+ //configure left half
+ if(lDest != OV_INVALID) {
+ PipeArgs pargL(mdpFlagsL, whf, lz, isFg,
+ static_cast<eRotFlags>(rotFlags), layer->planeAlpha,
+ (ovutils::eBlending) getBlending(layer->blending));
+
+ if(configMdp(ctx->mOverlay, pargL, orient,
+ tmp_cropL, tmp_dstL, metadata, lDest) < 0) {
+ ALOGE("%s: commit failed for left half config", __FUNCTION__);
+ return -1;
+ }
+ }
+
+ //configure right half
+ if(rDest != OV_INVALID) {
+ PipeArgs pargR(mdpFlagsR, whf, rz, isFg,
+ static_cast<eRotFlags>(rotFlags),
+ layer->planeAlpha,
+ (ovutils::eBlending) getBlending(layer->blending));
+ if(configMdp(ctx->mOverlay, pargR, orient,
+ tmp_cropR, tmp_dstR, metadata, rDest) < 0) {
+ ALOGE("%s: commit failed for right half config", __FUNCTION__);
return -1;
}
}
@@ -1726,9 +1872,9 @@
if(qdutils::MDPVersion::getInstance().is8x26() &&
ctx->mVirtualDisplay->isConnected() &&
!ctx->dpyAttr[HWC_DISPLAY_VIRTUAL].isPause) {
- // Allow if YUV needs rotation and DMA is configured to BLOCK mode for
- // primary. For portrait videos usecase on WFD, Driver supports
- // multiplexing of DMA pipe in LINE and BLOCK mode.
+ /* 8x26 mdss driver supports multiplexing of DMA pipe
+ * in LINE and BLOCK modes for writeback panels.
+ */
if(dpy == HWC_DISPLAY_PRIMARY)
return false;
}
@@ -1816,6 +1962,21 @@
mCount = 0;
}
+void LayerRotMap::clear() {
+ for (uint32_t i = 0; i < mCount; i++) {
+ //mCount represents rotator objects for just this display.
+ //We could have popped mCount topmost objects from mRotMgr, but if each
+ //round has the same failure, typical of stability runs, it would lead
+ //to unnecessary memory allocation, deallocation each time. So we let
+ //the rotator objects be around, but just knock off the fences they
+ //hold. Ultimately the rotator objects will be GCed when not required.
+ //Also resetting fences is required if at least one rotation round has
+ //succeeded before. It'll be a NOP otherwise.
+ mRot[i]->resetReleaseFd();
+ }
+ reset();
+}
+
void LayerRotMap::setReleaseFd(const int& fence) {
for(uint32_t i = 0; i < mCount; i++) {
mRot[i]->setReleaseFd(dup(fence));
diff --git a/libhwcomposer/hwc_utils.h b/libhwcomposer/hwc_utils.h
index 6288057..9751199 100644
--- a/libhwcomposer/hwc_utils.h
+++ b/libhwcomposer/hwc_utils.h
@@ -97,8 +97,9 @@
int yuvCount;
int yuvIndices[MAX_NUM_APP_LAYERS];
int extOnlyLayerIndex;
- bool needsAlphaScale;
bool preMultipliedAlpha;
+ int yuv4k2kIndices[MAX_NUM_APP_LAYERS];
+ int yuv4k2kCount;
// Notifies hwcomposer about the start and end of animation
// This will be set to true during animation, otherwise false.
bool isDisplayAnimating;
@@ -140,7 +141,11 @@
LayerRotMap() { reset(); }
enum { MAX_SESS = 3 };
void add(hwc_layer_1_t* layer, overlay::Rotator *rot);
+ //Resets the mapping of layer to rotator
void reset();
+ //Clears mappings and existing rotator fences
+ //Intended to be used during errors
+ void clear();
uint32_t getCount() const;
hwc_layer_1_t* getLayer(uint32_t index) const;
overlay::Rotator* getRot(uint32_t index) const;
@@ -199,8 +204,9 @@
bool isSecuring(hwc_context_t* ctx, hwc_layer_1_t const* layer);
bool isSecureModePolicy(int mdpVersion);
bool isExternalActive(hwc_context_t* ctx);
-bool needsScaling(hwc_context_t* ctx, hwc_layer_1_t const* layer,
- const int& dpy);
+bool isAlphaScaled(hwc_layer_1_t const* layer);
+bool needsScaling(hwc_layer_1_t const* layer);
+bool isDownscaleRequired(hwc_layer_1_t const* layer);
bool needsScalingWithSplit(hwc_context_t* ctx, hwc_layer_1_t const* layer,
const int& dpy);
void sanitizeSourceCrop(hwc_rect_t& cropL, hwc_rect_t& cropR,
@@ -287,6 +293,13 @@
ovutils::eIsFg& isFg, const ovutils::eDest& lDest,
const ovutils::eDest& rDest, overlay::Rotator **rot);
+//Routine to split and configure high resolution YUV layer (> 2048 width)
+int configureSourceSplit(hwc_context_t *ctx, hwc_layer_1_t *layer,
+ const int& dpy,
+ ovutils::eMdpFlags& mdpFlags, ovutils::eZorder& z,
+ ovutils::eIsFg& isFg, const ovutils::eDest& lDest,
+ const ovutils::eDest& rDest, overlay::Rotator **rot);
+
//On certain targets DMA pipes are used for rotation and they won't be available
//for line operations. On a per-target basis we can restrict certain use cases
//from using rotator, since we know before-hand that such scenarios can lead to
@@ -309,6 +322,12 @@
return (hnd && (hnd->bufferType == BUFFER_TYPE_VIDEO));
}
+// Returns true if the buffer is yuv
+static inline bool is4kx2kYuvBuffer(const private_handle_t* hnd) {
+ return (hnd && (hnd->bufferType == BUFFER_TYPE_VIDEO) &&
+ (hnd->width > 2048));
+}
+
// Returns true if the buffer is secure
static inline bool isSecureBuffer(const private_handle_t* hnd) {
return (hnd && (private_handle_t::PRIV_FLAGS_SECURE_BUFFER & hnd->flags));
diff --git a/liboverlay/overlay.cpp b/liboverlay/overlay.cpp
index 432ba81..ad23e84 100644
--- a/liboverlay/overlay.cpp
+++ b/liboverlay/overlay.cpp
@@ -52,6 +52,7 @@
mDumpStr[0] = '\0';
initScalar();
+ setDMAMultiplexingSupported();
}
Overlay::~Overlay() {
@@ -67,7 +68,6 @@
PipeBook::resetUse(i);
PipeBook::resetAllocation(i);
}
- sForceSetBitmap = 0;
mDumpStr[0] = '\0';
#ifdef USES_QSEED_SCALAR
@@ -139,8 +139,10 @@
(mPipeBook[i].mMixer == MIXER_UNUSED || //Free or same mixer
mPipeBook[i].mMixer == mixer) &&
PipeBook::isNotAllocated(i) && //Free pipe
- !(sDMAMode == DMA_BLOCK_MODE && //DMA pipe in Line mode
- PipeBook::getPipeType((eDest)i) == OV_MDP_PIPE_DMA)) {
+ ( (sDMAMultiplexingSupported && dpy) ||
+ !(sDMAMode == DMA_BLOCK_MODE && //DMA pipe in Line mode
+ PipeBook::getPipeType((eDest)i) == OV_MDP_PIPE_DMA)) ){
+ //DMA-Multiplexing is only supported for WB on 8x26
dest = (eDest)i;
PipeBook::setAllocation(i);
break;
@@ -194,19 +196,14 @@
if(mPipeBook[index].mPipe->commit()) {
ret = true;
PipeBook::setUse((int)dest);
- if(sForceSetBitmap & (1 << mPipeBook[index].mDisplay)) {
- mPipeBook[index].mPipe->forceSet();
- }
} else {
int dpy = mPipeBook[index].mDisplay;
- for(int i = 0; i < PipeBook::NUM_PIPES; i++)
+ for(int i = 0; i < PipeBook::NUM_PIPES; i++) {
if (mPipeBook[i].mDisplay == dpy) {
PipeBook::resetAllocation(i);
PipeBook::resetUse(i);
- if(mPipeBook[i].valid()) {
- mPipeBook[i].mPipe->forceSet();
- }
}
+ }
}
return ret;
}
@@ -434,9 +431,6 @@
// Mark as available for this round
PipeBook::resetUse(i);
PipeBook::resetAllocation(i);
- if(mPipeBook[i].valid()) {
- mPipeBook[i].mPipe->forceSet();
- }
}
}
}
@@ -496,7 +490,7 @@
Overlay* Overlay::sInstance = 0;
int Overlay::sDpyFbMap[DPY_MAX] = {0, -1, -1};
int Overlay::sDMAMode = DMA_LINE_MODE;
-int Overlay::sForceSetBitmap = 0;
+bool Overlay::sDMAMultiplexingSupported = false;
int Overlay::PipeBook::NUM_PIPES = 0;
int Overlay::PipeBook::sPipeUsageBitmap = 0;
int Overlay::PipeBook::sLastUsageBitmap = 0;
diff --git a/liboverlay/overlay.h b/liboverlay/overlay.h
index e88c89c..fe855c1 100644
--- a/liboverlay/overlay.h
+++ b/liboverlay/overlay.h
@@ -31,6 +31,7 @@
#define OVERLAY_H
#include "overlayUtils.h"
+#include "mdp_version.h"
#include "utils/threads.h"
struct MetaData_t;
@@ -119,8 +120,6 @@
void getDump(char *buf, size_t len);
/* Reset usage and allocation bits on all pipes for given display */
void clear(int dpy);
- /* Marks the display, whose pipes need to be forcibaly configured */
- void forceSet(const int& dpy);
/* Closes open pipes, called during startup */
static int initOverlay();
@@ -140,6 +139,7 @@
explicit Overlay();
/*Validate index range, abort if invalid */
void validate(int index);
+ static void setDMAMultiplexingSupported();
void dump() const;
/* Creates a scalar object using libscale.so */
static void initScalar();
@@ -209,7 +209,7 @@
static Overlay *sInstance;
static int sDpyFbMap[DPY_MAX];
static int sDMAMode;
- static int sForceSetBitmap;
+ static bool sDMAMultiplexingSupported;
static void *sLibScaleHandle;
static scale::Scale *sScale;
};
@@ -271,6 +271,12 @@
sDMAMode = mode;
}
+inline void Overlay::setDMAMultiplexingSupported() {
+ sDMAMultiplexingSupported = false;
+ if(qdutils::MDPVersion::getInstance().is8x26())
+ sDMAMultiplexingSupported = true;
+}
+
inline int Overlay::getDMAMode() {
return sDMAMode;
}
@@ -280,10 +286,6 @@
return sDpyFbMap[dpy];
}
-inline void Overlay::forceSet(const int& dpy) {
- sForceSetBitmap |= (1 << dpy);
-}
-
inline scale::Scale *Overlay::getScalar() {
return sScale;
}
diff --git a/liboverlay/overlayCtrlData.h b/liboverlay/overlayCtrlData.h
index 18ef5e6..1b26b66 100644
--- a/liboverlay/overlayCtrlData.h
+++ b/liboverlay/overlayCtrlData.h
@@ -87,7 +87,6 @@
void dump() const;
/* Return the dump in the specified buffer */
void getDump(char *buf, size_t len);
- void forceSet();
private:
// mdp ctrl struct(info e.g.)
@@ -232,10 +231,6 @@
mMdp.getDump(buf, len);
}
-inline void Ctrl::forceSet() {
- mMdp.forceSet();
-}
-
inline Data::Data() {
mMdp.reset();
}
diff --git a/liboverlay/overlayMdp.cpp b/liboverlay/overlayMdp.cpp
index fbb173e..006e05d 100644
--- a/liboverlay/overlayMdp.cpp
+++ b/liboverlay/overlayMdp.cpp
@@ -69,7 +69,6 @@
mLkgo.id = MSMFB_NEW_REQUEST;
mOrientation = utils::OVERLAY_TRANSFORM_0;
mDownscale = 0;
- mForceSet = false;
mDpy = 0;
#ifdef USES_POST_PROCESSING
mPPChanged = false;
@@ -211,8 +210,7 @@
doDownscale();
- if(this->ovChanged() || mForceSet) {
- mForceSet = false;
+ if(this->ovChanged()) {
if(!mdp_wrapper::setOverlay(mFd.getFD(), mOVInfo)) {
ALOGE("MdpCtrl failed to setOverlay, restoring last known "
"good ov info");
diff --git a/liboverlay/overlayMdp.h b/liboverlay/overlayMdp.h
index 6dd3976..3cb6a41 100644
--- a/liboverlay/overlayMdp.h
+++ b/liboverlay/overlayMdp.h
@@ -83,7 +83,6 @@
utils::Dim getSrcRectDim() const;
/* setVisualParam */
bool setVisualParams(const MetaData_t& data);
- void forceSet();
private:
/* Perform transformation calculations */
@@ -133,7 +132,6 @@
/* FD for the mdp fbnum */
OvFD mFd;
int mDownscale;
- bool mForceSet;
int mDpy;
#ifdef USES_POST_PROCESSING
@@ -348,10 +346,6 @@
mOVInfo.flags |= MDP_SOURCE_ROTATED_90;
}
-inline void MdpCtrl::forceSet() {
- mForceSet = true;
-}
-
/////// MdpCtrl3D //////
inline MdpCtrl3D::MdpCtrl3D() { reset(); }
diff --git a/liboverlay/overlayRotator.cpp b/liboverlay/overlayRotator.cpp
index 7b3dda1..4b6a8bc 100644
--- a/liboverlay/overlayRotator.cpp
+++ b/liboverlay/overlayRotator.cpp
@@ -107,6 +107,12 @@
mRelFence[mCurrOffset] = fence;
}
+void RotMem::Mem::resetReleaseFd() {
+ //Will wait for previous offline rotation to finish, close fence fd
+ //and reset
+ setReleaseFd(-1);
+}
+
//============RotMgr=========================
RotMgr::RotMgr() {
diff --git a/liboverlay/overlayRotator.h b/liboverlay/overlayRotator.h
index 7c1095f..6bb94a6 100644
--- a/liboverlay/overlayRotator.h
+++ b/liboverlay/overlayRotator.h
@@ -56,6 +56,7 @@
bool close() { return m.close(); }
uint32_t size() const { return m.bufSz(); }
void setReleaseFd(const int& fence);
+ void resetReleaseFd();
// Max rotator buffers
enum { ROT_NUM_BUFS = 2 };
// rotator data info dst offset
@@ -72,6 +73,7 @@
Mem& prev() { return m[(_curr+1) % MAX_ROT_MEM]; }
RotMem& operator++() { ++_curr; return *this; }
void setReleaseFd(const int& fence) { curr().setReleaseFd(fence); }
+ void resetReleaseFd() { curr().resetReleaseFd(); }
bool close();
uint32_t _curr;
Mem m[MAX_ROT_MEM];
@@ -96,6 +98,7 @@
virtual void dump() const = 0;
virtual void getDump(char *buf, size_t len) const = 0;
void setReleaseFd(const int& fence) { mMem.setReleaseFd(fence); }
+ void resetReleaseFd() { mMem.resetReleaseFd(); }
static Rotator *getRotator();
protected:
diff --git a/liboverlay/pipes/overlayGenPipe.cpp b/liboverlay/pipes/overlayGenPipe.cpp
index c03378b..e0b580b 100644
--- a/liboverlay/pipes/overlayGenPipe.cpp
+++ b/liboverlay/pipes/overlayGenPipe.cpp
@@ -167,10 +167,6 @@
return true;
}
-void GenericPipe::forceSet() {
- mCtrlData.ctrl.forceSet();
-}
-
int GenericPipe::getPipeId() {
return mCtrlData.ctrl.getPipeId();
}
diff --git a/liboverlay/pipes/overlayGenPipe.h b/liboverlay/pipes/overlayGenPipe.h
index 5c963bc..ecdd001 100644
--- a/liboverlay/pipes/overlayGenPipe.h
+++ b/liboverlay/pipes/overlayGenPipe.h
@@ -75,10 +75,6 @@
void dump() const;
/* Return the dump in the specified buffer */
void getDump(char *buf, size_t len);
- /* Marks the pipe for forcible setting of params
- * even if they haven't changed
- */
- void forceSet();
int getPipeId();
private:
diff --git a/libqdutils/cb_utils.cpp b/libqdutils/cb_utils.cpp
index bc7e5b1..d8eec2a 100644
--- a/libqdutils/cb_utils.cpp
+++ b/libqdutils/cb_utils.cpp
@@ -49,6 +49,10 @@
Region wormholeRegion(fbFrameRect);
for (uint32_t i = 0 ; i < last; i++) {
+ //TODO Work on using hwc clear instead of gpu for HWC_BLIT
+ //If layer is marked for HWC_BLIT clear is done by GPU
+ if(list->hwLayers[i].compositionType == HWC_BLIT)
+ return 0;
// need to take care only in per pixel blending.
// Restrict calculation only for copybit layers.
if((list->hwLayers[i].blending != HWC_BLENDING_NONE) ||
diff --git a/libqdutils/mdp_version.cpp b/libqdutils/mdp_version.cpp
index 0809fd0..b219cd5 100644
--- a/libqdutils/mdp_version.cpp
+++ b/libqdutils/mdp_version.cpp
@@ -48,6 +48,8 @@
mMDPUpscale = 0;
mMDPDownscale = 0;
mPanelType = NO_PANEL;
+ mLowBw = 0;
+ mHighBw = 0;
if(!updatePanelInfo()) {
ALOGE("Unable to read Primary Panel Information");
@@ -168,8 +170,13 @@
else if(!strncmp(tokens[0], "max_upscale_ratio",
strlen("max_upscale_ratio"))) {
mMDPUpscale = atoi(tokens[1]);
- }
- else if(!strncmp(tokens[0], "features", strlen("features"))) {
+ } else if(!strncmp(tokens[0], "max_bandwidth_low",
+ strlen("max_bandwidth_low"))) {
+ mLowBw = atol(tokens[1]);
+ } else if(!strncmp(tokens[0], "max_bandwidth_high",
+ strlen("max_bandwidth_high"))) {
+ mHighBw = atol(tokens[1]);
+ } else if(!strncmp(tokens[0], "features", strlen("features"))) {
for(int i=1; i<index;i++) {
if(!strncmp(tokens[i], "bwc", strlen("bwc"))) {
mFeatures |= MDP_BWC_EN;
@@ -191,6 +198,9 @@
mRGBPipes, mVGPipes);
ALOGD_IF(DEBUG, "%s:mDMAPipes:%d \t mMDPDownscale:%d, mFeatures:%d",
__FUNCTION__, mDMAPipes, mMDPDownscale, mFeatures);
+ ALOGD_IF(DEBUG, "%s:mLowBw: %lu mHighBw: %lu", __FUNCTION__, mLowBw,
+ mHighBw);
+
return true;
}
@@ -234,32 +244,5 @@
return (mFeatures & MDP_BWC_EN);
}
-bool MDPVersion::is8x26() {
- // check for 8x26 variants
- // chip variants have same major number and minor numbers usually vary
- // for e.g., MDSS_MDP_HW_REV_101 is 0x10010000
- // 1001 - major number
- // 0000 - minor number
- // 8x26 v1 minor number is 0000
- // v2 minor number is 0001 etc..
- if( mMdpRev >= MDSS_MDP_HW_REV_101 && mMdpRev < MDSS_MDP_HW_REV_102) {
- return true;
- }
- return false;
-}
-
-bool MDPVersion::is8x74v2() {
- if( mMdpRev >= MDSS_MDP_HW_REV_102 && mMdpRev < MDSS_MDP_HW_REV_200) {
- return true;
- }
- return false;
-}
-
-bool MDPVersion::is8x92() {
- if( mMdpRev >= MDSS_MDP_HW_REV_200 && mMdpRev < MDSS_MDP_HW_REV_206) {
- return true;
- }
- return false;
-}
}; //namespace qdutils
diff --git a/libqdutils/mdp_version.h b/libqdutils/mdp_version.h
index 1e1e35e..60a2985 100644
--- a/libqdutils/mdp_version.h
+++ b/libqdutils/mdp_version.h
@@ -57,10 +57,20 @@
MDSS_V5 = 500,
};
+// chip variants have same major number and minor numbers usually vary
+// for e.g., MDSS_MDP_HW_REV_101 is 0x10010000
+// 1001 - major number
+// 0000 - minor number
+// 8x26 v1 minor number is 0000
+// v2 minor number is 0001 etc..
enum mdp_rev {
MDSS_MDP_HW_REV_100 = 0x10000000, //8974 v1
MDSS_MDP_HW_REV_101 = 0x10010000, //8x26
MDSS_MDP_HW_REV_102 = 0x10020000, //8974 v2
+ MDSS_MDP_HW_REV_103 = 0x10030000, //8084
+ MDSS_MDP_HW_REV_104 = 0x10040000, //Next version
+ MDSS_MDP_HW_REV_105 = 0x10050000, //Next version
+ MDSS_MDP_HW_REV_107 = 0x10070000, //Next version
MDSS_MDP_HW_REV_200 = 0x20000000, //8092
MDSS_MDP_HW_REV_206 = 0x20060000, //Future
};
@@ -108,11 +118,28 @@
bool supportsDecimation();
uint32_t getMaxMDPDownscale();
bool supportsBWC();
- bool is8x26();
- bool is8x74v2();
- bool is8x92();
int getLeftSplit() { return mSplit.left(); }
int getRightSplit() { return mSplit.right(); }
+ unsigned long getLowBw() { return mLowBw; }
+ unsigned long getHighBw() { return mHighBw; }
+
+ bool is8x26() {
+ return (mMdpRev >= MDSS_MDP_HW_REV_101 and
+ mMdpRev < MDSS_MDP_HW_REV_102);
+ }
+ bool is8x74v2() {
+ return (mMdpRev >= MDSS_MDP_HW_REV_102 and
+ mMdpRev < MDSS_MDP_HW_REV_103);
+ }
+ bool is8084() {
+ return (mMdpRev >= MDSS_MDP_HW_REV_103 and
+ mMdpRev < MDSS_MDP_HW_REV_104);
+ }
+ bool is8092() {
+ return (mMdpRev >= MDSS_MDP_HW_REV_200 and
+ mMdpRev < MDSS_MDP_HW_REV_206);
+ }
+
private:
bool updateSysFsInfo();
bool updatePanelInfo();
@@ -131,6 +158,8 @@
uint32_t mMDPDownscale;
uint32_t mMDPUpscale;
Split mSplit;
+ unsigned long mLowBw; //kbps
+ unsigned long mHighBw; //kbps
};
}; //namespace qdutils
#endif //INCLUDE_LIBQCOMUTILS_MDPVER