Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1 | /* Copyright (c) 2007, 2013-2014, 2016-2018, The Linux Foundation. All rights reserved. |
| 2 | * Copyright (C) 2007 Google Incorporated |
| 3 | * |
| 4 | * This software is licensed under the terms of the GNU General Public |
| 5 | * License version 2, as published by the Free Software Foundation, and |
| 6 | * may be copied, distributed, and modified under those terms. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/file.h> |
| 15 | #include <linux/io.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/major.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/types.h> |
| 20 | #include <linux/uaccess.h> |
| 21 | #include <linux/sched.h> |
| 22 | #include <linux/mutex.h> |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 23 | #include "linux/proc_fs.h" |
| 24 | #include <linux/delay.h> |
Krishna Manikandan | 7ef5b5c | 2018-04-27 17:09:41 +0530 | [diff] [blame] | 25 | #include <linux/fence.h> |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 26 | |
| 27 | #include "mdss_fb.h" |
| 28 | #include "mdp3_ppp.h" |
| 29 | #include "mdp3_hwio.h" |
| 30 | #include "mdp3.h" |
| 31 | #include "mdss_debug.h" |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 32 | #include "mdss_sync.h" |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 33 | |
| 34 | #define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT) |
| 35 | #define MDP_RELEASE_BW_TIMEOUT 50 |
| 36 | |
| 37 | #define MDP_PPP_MAX_BPP 4 |
| 38 | #define MDP_PPP_DYNAMIC_FACTOR 3 |
| 39 | #define MDP_PPP_MAX_READ_WRITE 3 |
| 40 | #define MDP_PPP_MAX_WIDTH 0xFFF |
| 41 | #define ENABLE_SOLID_FILL 0x2 |
| 42 | #define DISABLE_SOLID_FILL 0x0 |
| 43 | #define BLEND_LATENCY 3 |
| 44 | #define CSC_LATENCY 1 |
| 45 | |
| 46 | #define YUV_BW_FUDGE_NUM 10 |
| 47 | #define YUV_BW_FUDGE_DEN 10 |
| 48 | |
| 49 | struct ppp_resource ppp_res; |
| 50 | |
| 51 | static const bool valid_fmt[MDP_IMGTYPE_LIMIT] = { |
| 52 | [MDP_RGB_565] = true, |
| 53 | [MDP_BGR_565] = true, |
| 54 | [MDP_RGB_888] = true, |
| 55 | [MDP_BGR_888] = true, |
| 56 | [MDP_BGRA_8888] = true, |
| 57 | [MDP_RGBA_8888] = true, |
| 58 | [MDP_ARGB_8888] = true, |
| 59 | [MDP_XRGB_8888] = true, |
| 60 | [MDP_RGBX_8888] = true, |
| 61 | [MDP_Y_CRCB_H2V2] = true, |
| 62 | [MDP_Y_CBCR_H2V2] = true, |
| 63 | [MDP_Y_CBCR_H2V2_ADRENO] = true, |
| 64 | [MDP_Y_CBCR_H2V2_VENUS] = true, |
| 65 | [MDP_YCRYCB_H2V1] = true, |
| 66 | [MDP_Y_CBCR_H2V1] = true, |
| 67 | [MDP_Y_CRCB_H2V1] = true, |
| 68 | [MDP_BGRX_8888] = true, |
| 69 | }; |
| 70 | |
| 71 | #define MAX_LIST_WINDOW 16 |
| 72 | #define MDP3_PPP_MAX_LIST_REQ 8 |
| 73 | |
| 74 | struct blit_req_list { |
| 75 | int count; |
| 76 | struct mdp_blit_req req_list[MAX_LIST_WINDOW]; |
| 77 | struct mdp3_img_data src_data[MAX_LIST_WINDOW]; |
| 78 | struct mdp3_img_data dst_data[MAX_LIST_WINDOW]; |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 79 | struct mdss_fence *acq_fen[MDP_MAX_FENCE_FD]; |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 80 | u32 acq_fen_cnt; |
| 81 | int cur_rel_fen_fd; |
| 82 | struct sync_pt *cur_rel_sync_pt; |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 83 | struct mdss_fence *cur_rel_fence; |
| 84 | struct mdss_fence *last_rel_fence; |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 85 | }; |
| 86 | |
| 87 | struct blit_req_queue { |
| 88 | struct blit_req_list req[MDP3_PPP_MAX_LIST_REQ]; |
| 89 | int count; |
| 90 | int push_idx; |
| 91 | int pop_idx; |
| 92 | }; |
| 93 | |
| 94 | struct ppp_status { |
| 95 | bool wait_for_pop; |
| 96 | struct completion ppp_comp; |
| 97 | struct completion pop_q_comp; |
| 98 | struct mutex req_mutex; /* Protect request queue */ |
| 99 | struct mutex config_ppp_mutex; /* Only one client configure register */ |
| 100 | struct msm_fb_data_type *mfd; |
| 101 | |
| 102 | struct kthread_work blit_work; |
| 103 | struct kthread_worker kworker; |
| 104 | struct task_struct *blit_thread; |
| 105 | struct blit_req_queue req_q; |
| 106 | |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 107 | struct mdss_timeline *timeline; |
| 108 | |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 109 | int timeline_value; |
| 110 | |
| 111 | struct timer_list free_bw_timer; |
| 112 | struct work_struct free_bw_work; |
| 113 | bool bw_update; |
| 114 | bool bw_on; |
| 115 | u32 mdp_clk; |
| 116 | }; |
| 117 | |
| 118 | static struct ppp_status *ppp_stat; |
| 119 | static bool is_blit_optimization_possible(struct blit_req_list *req, int indx); |
| 120 | |
| 121 | static inline u64 fudge_factor(u64 val, u32 numer, u32 denom) |
| 122 | { |
| 123 | u64 result = (val * (u64)numer); |
| 124 | |
| 125 | do_div(result, denom); |
| 126 | return result; |
| 127 | } |
| 128 | |
| 129 | int ppp_get_bpp(uint32_t format, uint32_t fb_format) |
| 130 | { |
| 131 | int bpp = -EINVAL; |
| 132 | |
| 133 | if (format == MDP_FB_FORMAT) |
| 134 | format = fb_format; |
| 135 | |
| 136 | bpp = ppp_bpp(format); |
| 137 | if (bpp <= 0) |
| 138 | pr_err("%s incorrect format %d\n", __func__, format); |
| 139 | return bpp; |
| 140 | } |
| 141 | |
| 142 | int mdp3_ppp_get_img(struct mdp_img *img, struct mdp_blit_req *req, |
| 143 | struct mdp3_img_data *data) |
| 144 | { |
| 145 | struct msmfb_data fb_data; |
| 146 | uint32_t stride; |
| 147 | int bpp = ppp_bpp(img->format); |
| 148 | |
| 149 | if (bpp <= 0) { |
| 150 | pr_err("%s incorrect format %d\n", __func__, img->format); |
| 151 | return -EINVAL; |
| 152 | } |
| 153 | |
| 154 | if (img->width > MDP_PPP_MAX_WIDTH) { |
| 155 | pr_err("%s incorrect width %d\n", __func__, img->width); |
| 156 | return -EINVAL; |
| 157 | } |
| 158 | |
| 159 | fb_data.flags = img->priv; |
| 160 | fb_data.memory_id = img->memory_id; |
| 161 | fb_data.offset = 0; |
| 162 | |
| 163 | stride = img->width * bpp; |
| 164 | data->padding = 16 * stride; |
| 165 | |
| 166 | return mdp3_get_img(&fb_data, data, MDP3_CLIENT_PPP); |
| 167 | } |
| 168 | |
| 169 | /* Check format */ |
| 170 | int mdp3_ppp_verify_fmt(struct mdp_blit_req *req) |
| 171 | { |
| 172 | if (MDP_IS_IMGTYPE_BAD(req->src.format) || |
| 173 | MDP_IS_IMGTYPE_BAD(req->dst.format)) { |
| 174 | pr_err("%s: Color format out of range\n", __func__); |
| 175 | return -EINVAL; |
| 176 | } |
| 177 | |
| 178 | if (!valid_fmt[req->src.format] || |
| 179 | !valid_fmt[req->dst.format]) { |
| 180 | pr_err("%s: Color format not supported\n", __func__); |
| 181 | return -EINVAL; |
| 182 | } |
| 183 | return 0; |
| 184 | } |
| 185 | |
| 186 | /* Check resolution */ |
| 187 | int mdp3_ppp_verify_res(struct mdp_blit_req *req) |
| 188 | { |
| 189 | if ((req->src.width == 0) || (req->src.height == 0) || |
| 190 | (req->src_rect.w == 0) || (req->src_rect.h == 0) || |
| 191 | (req->dst.width == 0) || (req->dst.height == 0) || |
| 192 | (req->dst_rect.w == 0) || (req->dst_rect.h == 0)) { |
| 193 | pr_err("%s: Height/width can't be 0\n", __func__); |
| 194 | return -EINVAL; |
| 195 | } |
| 196 | |
| 197 | if (((req->src_rect.x + req->src_rect.w) > req->src.width) || |
| 198 | ((req->src_rect.y + req->src_rect.h) > req->src.height)) { |
Arun kumar | 162db22 | 2018-05-09 17:28:40 +0530 | [diff] [blame] | 199 | pr_err("%s: src roi (x=%d,y=%d,w=%d, h=%d) WxH(%dx%d)\n", |
| 200 | __func__, req->src_rect.x, req->src_rect.y, |
| 201 | req->src_rect.w, req->src_rect.h, req->src.width, |
| 202 | req->src.height); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 203 | pr_err("%s: src roi larger than boundary\n", __func__); |
| 204 | return -EINVAL; |
| 205 | } |
| 206 | |
| 207 | if (((req->dst_rect.x + req->dst_rect.w) > req->dst.width) || |
| 208 | ((req->dst_rect.y + req->dst_rect.h) > req->dst.height)) { |
Arun kumar | 162db22 | 2018-05-09 17:28:40 +0530 | [diff] [blame] | 209 | pr_err("%s: dst roi (x=%d,y=%d,w=%d, h=%d) WxH(%dx%d)\n", |
| 210 | __func__, req->dst_rect.x, req->dst_rect.y, |
| 211 | req->dst_rect.w, req->dst_rect.h, req->dst.width, |
| 212 | req->dst.height); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 213 | pr_err("%s: dst roi larger than boundary\n", __func__); |
| 214 | return -EINVAL; |
| 215 | } |
| 216 | return 0; |
| 217 | } |
| 218 | |
| 219 | /* scaling range check */ |
| 220 | int mdp3_ppp_verify_scale(struct mdp_blit_req *req) |
| 221 | { |
| 222 | u32 src_width, src_height, dst_width, dst_height; |
| 223 | |
| 224 | src_width = req->src_rect.w; |
| 225 | src_height = req->src_rect.h; |
| 226 | |
| 227 | if (req->flags & MDP_ROT_90) { |
| 228 | dst_width = req->dst_rect.h; |
| 229 | dst_height = req->dst_rect.w; |
| 230 | } else { |
| 231 | dst_width = req->dst_rect.w; |
| 232 | dst_height = req->dst_rect.h; |
| 233 | } |
| 234 | |
| 235 | switch (req->dst.format) { |
| 236 | case MDP_Y_CRCB_H2V2: |
| 237 | case MDP_Y_CBCR_H2V2: |
| 238 | src_width = (src_width / 2) * 2; |
| 239 | src_height = (src_height / 2) * 2; |
| 240 | dst_width = (dst_width / 2) * 2; |
| 241 | dst_height = (dst_height / 2) * 2; |
| 242 | break; |
| 243 | |
| 244 | case MDP_Y_CRCB_H2V1: |
| 245 | case MDP_Y_CBCR_H2V1: |
| 246 | case MDP_YCRYCB_H2V1: |
| 247 | src_width = (src_width / 2) * 2; |
| 248 | dst_width = (dst_width / 2) * 2; |
| 249 | break; |
| 250 | |
| 251 | default: |
| 252 | break; |
| 253 | } |
| 254 | |
| 255 | if (((MDP_SCALE_Q_FACTOR * dst_width) / src_width > |
| 256 | MDP_MAX_X_SCALE_FACTOR) |
| 257 | || ((MDP_SCALE_Q_FACTOR * dst_width) / src_width < |
| 258 | MDP_MIN_X_SCALE_FACTOR)) { |
| 259 | pr_err("%s: x req scale factor beyond capability\n", __func__); |
| 260 | return -EINVAL; |
| 261 | } |
| 262 | |
| 263 | if (((MDP_SCALE_Q_FACTOR * dst_height) / src_height > |
| 264 | MDP_MAX_Y_SCALE_FACTOR) |
| 265 | || ((MDP_SCALE_Q_FACTOR * dst_height) / src_height < |
| 266 | MDP_MIN_Y_SCALE_FACTOR)) { |
| 267 | pr_err("%s: y req scale factor beyond capability\n", __func__); |
| 268 | return -EINVAL; |
| 269 | } |
| 270 | return 0; |
| 271 | } |
| 272 | |
| 273 | /* operation check */ |
| 274 | int mdp3_ppp_verify_op(struct mdp_blit_req *req) |
| 275 | { |
| 276 | /* |
| 277 | * MDP_DEINTERLACE & MDP_SHARPENING Flags are not valid for MDP3 |
| 278 | * so using them together for MDP_SMART_BLIT. |
| 279 | */ |
| 280 | if ((req->flags & MDP_SMART_BLIT) == MDP_SMART_BLIT) |
| 281 | return 0; |
| 282 | if (req->flags & MDP_DEINTERLACE) { |
| 283 | pr_err("\n%s(): deinterlace not supported", __func__); |
| 284 | return -EINVAL; |
| 285 | } |
| 286 | |
| 287 | if (req->flags & MDP_SHARPENING) { |
| 288 | pr_err("\n%s(): sharpening not supported", __func__); |
| 289 | return -EINVAL; |
| 290 | } |
| 291 | return 0; |
| 292 | } |
| 293 | |
| 294 | int mdp3_ppp_verify_req(struct mdp_blit_req *req) |
| 295 | { |
| 296 | int rc; |
| 297 | |
| 298 | if (req == NULL) { |
| 299 | pr_err("%s: req == null\n", __func__); |
| 300 | return -EINVAL; |
| 301 | } |
| 302 | |
| 303 | rc = mdp3_ppp_verify_fmt(req); |
| 304 | rc |= mdp3_ppp_verify_res(req); |
| 305 | rc |= mdp3_ppp_verify_scale(req); |
| 306 | rc |= mdp3_ppp_verify_op(req); |
| 307 | |
| 308 | return rc; |
| 309 | } |
| 310 | |
| 311 | int mdp3_ppp_pipe_wait(void) |
| 312 | { |
| 313 | int ret = 1; |
| 314 | |
| 315 | /* |
| 316 | * wait 200 ms for ppp operation to complete before declaring |
| 317 | * the MDP hung |
| 318 | */ |
| 319 | ret = wait_for_completion_timeout( |
| 320 | &ppp_stat->ppp_comp, msecs_to_jiffies(200)); |
Jayaprakash | 220ea03 | 2018-12-21 17:28:11 +0530 | [diff] [blame] | 321 | if (!ret) { |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 322 | pr_err("%s: Timed out waiting for the MDP.\n", |
| 323 | __func__); |
Jayaprakash | 220ea03 | 2018-12-21 17:28:11 +0530 | [diff] [blame] | 324 | MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", |
| 325 | "dsi0_ctrl", "dsi0_phy"); |
| 326 | } |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 327 | return ret; |
| 328 | } |
| 329 | |
| 330 | uint32_t mdp3_calc_tpval(struct ppp_img_desc *img, uint32_t old_tp) |
| 331 | { |
| 332 | uint32_t tpVal; |
| 333 | uint8_t plane_tp; |
| 334 | |
| 335 | tpVal = 0; |
| 336 | if ((img->color_fmt == MDP_RGB_565) |
| 337 | || (img->color_fmt == MDP_BGR_565)) { |
| 338 | /* transparent color conversion into 24 bpp */ |
| 339 | plane_tp = (uint8_t) ((old_tp & 0xF800) >> 11); |
| 340 | tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 16; |
| 341 | plane_tp = (uint8_t) (old_tp & 0x1F); |
| 342 | tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 8; |
| 343 | |
| 344 | plane_tp = (uint8_t) ((old_tp & 0x7E0) >> 5); |
| 345 | tpVal |= ((plane_tp << 2) | ((plane_tp & 0x30) >> 4)); |
| 346 | } else { |
| 347 | /* 24bit RGB to RBG conversion */ |
| 348 | tpVal = (old_tp & 0xFF00) >> 8; |
| 349 | tpVal |= (old_tp & 0xFF) << 8; |
| 350 | tpVal |= (old_tp & 0xFF0000); |
| 351 | } |
| 352 | |
| 353 | return tpVal; |
| 354 | } |
| 355 | |
| 356 | static void mdp3_ppp_intr_handler(int type, void *arg) |
| 357 | { |
| 358 | complete(&ppp_stat->ppp_comp); |
| 359 | } |
| 360 | |
| 361 | static int mdp3_ppp_callback_setup(void) |
| 362 | { |
| 363 | int rc; |
| 364 | struct mdp3_intr_cb ppp_done_cb = { |
| 365 | .cb = mdp3_ppp_intr_handler, |
| 366 | .data = NULL, |
| 367 | }; |
| 368 | |
| 369 | rc = mdp3_set_intr_callback(MDP3_PPP_DONE, &ppp_done_cb); |
| 370 | return rc; |
| 371 | } |
| 372 | |
| 373 | void mdp3_ppp_kickoff(void) |
| 374 | { |
| 375 | init_completion(&ppp_stat->ppp_comp); |
| 376 | mdp3_irq_enable(MDP3_PPP_DONE); |
| 377 | ppp_enable(); |
| 378 | ATRACE_BEGIN("mdp3_wait_for_ppp_comp"); |
| 379 | mdp3_ppp_pipe_wait(); |
| 380 | ATRACE_END("mdp3_wait_for_ppp_comp"); |
| 381 | mdp3_irq_disable(MDP3_PPP_DONE); |
| 382 | } |
| 383 | |
| 384 | struct bpp_info { |
| 385 | int bpp_num; |
| 386 | int bpp_den; |
| 387 | int bpp_pln; |
| 388 | }; |
| 389 | |
| 390 | int mdp3_get_bpp_info(int format, struct bpp_info *bpp) |
| 391 | { |
| 392 | int rc = 0; |
| 393 | |
| 394 | switch (format) { |
| 395 | case MDP_RGB_565: |
| 396 | case MDP_BGR_565: |
| 397 | bpp->bpp_num = 2; |
| 398 | bpp->bpp_den = 1; |
| 399 | bpp->bpp_pln = 2; |
| 400 | break; |
| 401 | case MDP_RGB_888: |
| 402 | case MDP_BGR_888: |
| 403 | bpp->bpp_num = 3; |
| 404 | bpp->bpp_den = 1; |
| 405 | bpp->bpp_pln = 3; |
| 406 | break; |
| 407 | case MDP_BGRA_8888: |
| 408 | case MDP_RGBA_8888: |
| 409 | case MDP_ARGB_8888: |
| 410 | case MDP_XRGB_8888: |
| 411 | case MDP_RGBX_8888: |
| 412 | case MDP_BGRX_8888: |
| 413 | bpp->bpp_num = 4; |
| 414 | bpp->bpp_den = 1; |
| 415 | bpp->bpp_pln = 4; |
| 416 | break; |
| 417 | case MDP_Y_CRCB_H2V2: |
| 418 | case MDP_Y_CBCR_H2V2: |
| 419 | case MDP_Y_CBCR_H2V2_ADRENO: |
| 420 | case MDP_Y_CBCR_H2V2_VENUS: |
| 421 | bpp->bpp_num = 3; |
| 422 | bpp->bpp_den = 2; |
| 423 | bpp->bpp_pln = 1; |
| 424 | break; |
| 425 | case MDP_Y_CBCR_H2V1: |
| 426 | case MDP_Y_CRCB_H2V1: |
| 427 | bpp->bpp_num = 2; |
| 428 | bpp->bpp_den = 1; |
| 429 | bpp->bpp_pln = 1; |
| 430 | break; |
| 431 | case MDP_YCRYCB_H2V1: |
| 432 | bpp->bpp_num = 2; |
| 433 | bpp->bpp_den = 1; |
| 434 | bpp->bpp_pln = 2; |
| 435 | break; |
| 436 | default: |
| 437 | rc = -EINVAL; |
| 438 | } |
| 439 | return rc; |
| 440 | } |
| 441 | |
| 442 | bool mdp3_is_blend(struct mdp_blit_req *req) |
| 443 | { |
| 444 | if ((req->transp_mask != MDP_TRANSP_NOP) || |
| 445 | (req->alpha < MDP_ALPHA_NOP) || |
| 446 | (req->src.format == MDP_ARGB_8888) || |
| 447 | (req->src.format == MDP_BGRA_8888) || |
| 448 | (req->src.format == MDP_RGBA_8888)) |
| 449 | return true; |
| 450 | return false; |
| 451 | } |
| 452 | |
| 453 | bool mdp3_is_scale(struct mdp_blit_req *req) |
| 454 | { |
| 455 | if (req->flags & MDP_ROT_90) { |
| 456 | if (req->src_rect.w != req->dst_rect.h || |
| 457 | req->src_rect.h != req->dst_rect.w) |
| 458 | return true; |
| 459 | } else { |
| 460 | if (req->src_rect.h != req->dst_rect.h || |
| 461 | req->src_rect.w != req->dst_rect.w) |
| 462 | return true; |
| 463 | } |
| 464 | return false; |
| 465 | } |
| 466 | |
| 467 | u32 mdp3_clk_calc(struct msm_fb_data_type *mfd, |
| 468 | struct blit_req_list *lreq, u32 fps) |
| 469 | { |
| 470 | int i, lcount = 0; |
| 471 | struct mdp_blit_req *req; |
| 472 | u64 mdp_clk_rate = 0; |
| 473 | u32 scale_x = 0, scale_y = 0, scale = 0; |
| 474 | u32 blend_l, csc_l; |
| 475 | |
| 476 | lcount = lreq->count; |
| 477 | |
| 478 | blend_l = 100 * BLEND_LATENCY; |
| 479 | csc_l = 100 * CSC_LATENCY; |
| 480 | |
| 481 | for (i = 0; i < lcount; i++) { |
| 482 | req = &(lreq->req_list[i]); |
| 483 | |
| 484 | if (req->flags & MDP_SMART_BLIT) |
| 485 | continue; |
| 486 | |
| 487 | if (mdp3_is_scale(req)) { |
| 488 | if (req->flags & MDP_ROT_90) { |
| 489 | scale_x = 100 * req->src_rect.h / |
| 490 | req->dst_rect.w; |
| 491 | scale_y = 100 * req->src_rect.w / |
| 492 | req->dst_rect.h; |
| 493 | } else { |
| 494 | scale_x = 100 * req->src_rect.w / |
| 495 | req->dst_rect.w; |
| 496 | scale_y = 100 * req->src_rect.h / |
| 497 | req->dst_rect.h; |
| 498 | } |
| 499 | scale = max(scale_x, scale_y); |
| 500 | } |
| 501 | scale = scale >= 100 ? scale : 100; |
| 502 | if (mdp3_is_blend(req)) |
| 503 | scale = max(scale, blend_l); |
| 504 | |
| 505 | if (!check_if_rgb(req->src.format)) |
| 506 | scale = max(scale, csc_l); |
| 507 | |
| 508 | mdp_clk_rate += (req->src_rect.w * req->src_rect.h * |
| 509 | scale / 100) * fps; |
| 510 | } |
| 511 | mdp_clk_rate += (ppp_res.solid_fill_pixel * fps); |
| 512 | mdp_clk_rate = fudge_factor(mdp_clk_rate, |
| 513 | CLK_FUDGE_NUM, CLK_FUDGE_DEN); |
| 514 | pr_debug("mdp_clk_rate for ppp = %llu\n", mdp_clk_rate); |
| 515 | mdp_clk_rate = mdp3_clk_round_off(mdp_clk_rate); |
| 516 | |
| 517 | return mdp_clk_rate; |
| 518 | } |
| 519 | |
| 520 | u64 mdp3_adjust_scale_factor(struct mdp_blit_req *req, u32 bw_req, int bpp) |
| 521 | { |
| 522 | int src_h, src_w; |
| 523 | int dst_h, dst_w; |
| 524 | |
| 525 | src_h = req->src_rect.h; |
| 526 | src_w = req->src_rect.w; |
| 527 | |
| 528 | dst_h = req->dst_rect.h; |
| 529 | dst_w = req->dst_rect.w; |
| 530 | |
| 531 | if ((!(req->flags & MDP_ROT_90) && src_h == dst_h && |
| 532 | src_w == dst_w) || ((req->flags & MDP_ROT_90) && |
| 533 | src_h == dst_w && src_w == dst_h)) |
| 534 | return bw_req; |
| 535 | |
| 536 | bw_req = (bw_req + (bw_req * dst_h) / (4 * src_h)); |
| 537 | bw_req = (bw_req + (bw_req * dst_w) / (4 * src_w) + |
| 538 | (bw_req * dst_w) / (bpp * src_w)); |
| 539 | return bw_req; |
| 540 | } |
| 541 | |
| 542 | int mdp3_calc_ppp_res(struct msm_fb_data_type *mfd, |
| 543 | struct blit_req_list *lreq) |
| 544 | { |
| 545 | struct mdss_panel_info *panel_info = mfd->panel_info; |
| 546 | int i, lcount = 0; |
Arun kumar | db96281 | 2018-05-30 16:31:52 +0530 | [diff] [blame] | 547 | int frame_rate = DEFAULT_FRAME_RATE; |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 548 | struct mdp_blit_req *req; |
| 549 | struct bpp_info bpp; |
| 550 | u64 old_solid_fill_pixel = 0; |
| 551 | u64 new_solid_fill_pixel = 0; |
| 552 | u64 src_read_bw = 0; |
| 553 | u32 bg_read_bw = 0; |
| 554 | u32 dst_write_bw = 0; |
| 555 | u64 honest_ppp_ab = 0; |
| 556 | u32 fps = 0; |
| 557 | int smart_blit_fg_indx = -1; |
| 558 | u32 smart_blit_bg_read_bw = 0; |
| 559 | |
| 560 | ATRACE_BEGIN(__func__); |
| 561 | lcount = lreq->count; |
Arun kumar | db96281 | 2018-05-30 16:31:52 +0530 | [diff] [blame] | 562 | frame_rate = mdss_panel_get_framerate(panel_info, FPS_RESOLUTION_HZ); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 563 | if (lcount == 0) { |
| 564 | pr_err("Blit with request count 0, continue to recover!!!\n"); |
| 565 | ATRACE_END(__func__); |
| 566 | return 0; |
| 567 | } |
| 568 | if (lreq->req_list[0].flags & MDP_SOLID_FILL) { |
| 569 | req = &(lreq->req_list[0]); |
| 570 | mdp3_get_bpp_info(req->dst.format, &bpp); |
| 571 | old_solid_fill_pixel = ppp_res.solid_fill_pixel; |
| 572 | new_solid_fill_pixel = req->dst_rect.w * req->dst_rect.h; |
| 573 | ppp_res.solid_fill_pixel += new_solid_fill_pixel; |
| 574 | ppp_res.solid_fill_byte += req->dst_rect.w * req->dst_rect.h * |
| 575 | bpp.bpp_num / bpp.bpp_den; |
| 576 | if ((old_solid_fill_pixel >= new_solid_fill_pixel) || |
| 577 | (mdp3_res->solid_fill_vote_en)) { |
| 578 | pr_debug("Last fill pixels are higher or fill_en %d\n", |
| 579 | mdp3_res->solid_fill_vote_en); |
| 580 | ATRACE_END(__func__); |
| 581 | return 0; |
| 582 | } |
| 583 | } |
| 584 | |
| 585 | for (i = 0; i < lcount; i++) { |
| 586 | /* Set Smart blit flag before BW calculation */ |
| 587 | is_blit_optimization_possible(lreq, i); |
| 588 | req = &(lreq->req_list[i]); |
| 589 | |
Arun kumar | db96281 | 2018-05-30 16:31:52 +0530 | [diff] [blame] | 590 | if (req->fps > 0 && req->fps <= frame_rate) { |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 591 | if (fps == 0) |
| 592 | fps = req->fps; |
| 593 | else |
Arun kumar | db96281 | 2018-05-30 16:31:52 +0530 | [diff] [blame] | 594 | fps = frame_rate; |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 595 | } |
| 596 | |
| 597 | mdp3_get_bpp_info(req->src.format, &bpp); |
| 598 | if (lreq->req_list[i].flags & MDP_SMART_BLIT) { |
| 599 | /* |
| 600 | * Flag for smart blit FG layer index |
| 601 | * If blit request at index "n" has |
| 602 | * MDP_SMART_BLIT flag set then it will be used as BG |
| 603 | * layer in smart blit and request at index "n+1" |
| 604 | * will be used as FG layer |
| 605 | */ |
| 606 | smart_blit_fg_indx = i + 1; |
| 607 | bg_read_bw = req->src_rect.w * req->src_rect.h * |
| 608 | bpp.bpp_num / bpp.bpp_den; |
| 609 | bg_read_bw = mdp3_adjust_scale_factor(req, |
| 610 | bg_read_bw, bpp.bpp_pln); |
| 611 | /* Cache read BW of smart blit BG layer */ |
| 612 | smart_blit_bg_read_bw = bg_read_bw; |
| 613 | } else { |
| 614 | src_read_bw = req->src_rect.w * req->src_rect.h * |
| 615 | bpp.bpp_num / bpp.bpp_den; |
| 616 | src_read_bw = mdp3_adjust_scale_factor(req, |
| 617 | src_read_bw, bpp.bpp_pln); |
| 618 | if (!(check_if_rgb(req->src.format))) { |
| 619 | src_read_bw = fudge_factor(src_read_bw, |
| 620 | YUV_BW_FUDGE_NUM, |
| 621 | YUV_BW_FUDGE_DEN); |
| 622 | } |
| 623 | mdp3_get_bpp_info(req->dst.format, &bpp); |
| 624 | |
| 625 | if (smart_blit_fg_indx == i) { |
| 626 | bg_read_bw = smart_blit_bg_read_bw; |
| 627 | smart_blit_fg_indx = -1; |
| 628 | } else { |
| 629 | if ((req->transp_mask != MDP_TRANSP_NOP) || |
| 630 | (req->alpha < MDP_ALPHA_NOP) || |
| 631 | (req->src.format == MDP_ARGB_8888) || |
| 632 | (req->src.format == MDP_BGRA_8888) || |
| 633 | (req->src.format == MDP_RGBA_8888)) { |
| 634 | bg_read_bw = req->dst_rect.w * |
| 635 | req->dst_rect.h * |
| 636 | bpp.bpp_num / bpp.bpp_den; |
| 637 | bg_read_bw = mdp3_adjust_scale_factor( |
| 638 | req, bg_read_bw, |
| 639 | bpp.bpp_pln); |
| 640 | } else { |
| 641 | bg_read_bw = 0; |
| 642 | } |
| 643 | } |
| 644 | dst_write_bw = req->dst_rect.w * req->dst_rect.h * |
| 645 | bpp.bpp_num / bpp.bpp_den; |
| 646 | honest_ppp_ab += (src_read_bw + bg_read_bw + |
| 647 | dst_write_bw); |
| 648 | } |
| 649 | } |
| 650 | |
| 651 | if (fps == 0) |
Arun kumar | db96281 | 2018-05-30 16:31:52 +0530 | [diff] [blame] | 652 | fps = frame_rate; |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 653 | |
| 654 | if (lreq->req_list[0].flags & MDP_SOLID_FILL) { |
| 655 | honest_ppp_ab = ppp_res.solid_fill_byte * 4; |
| 656 | pr_debug("solid fill honest_ppp_ab %llu\n", honest_ppp_ab); |
| 657 | } else { |
| 658 | honest_ppp_ab += ppp_res.solid_fill_byte; |
| 659 | mdp3_res->solid_fill_vote_en = true; |
| 660 | } |
| 661 | |
| 662 | honest_ppp_ab = honest_ppp_ab * fps; |
| 663 | if (honest_ppp_ab != ppp_res.next_ab) { |
| 664 | ppp_res.next_ab = honest_ppp_ab; |
| 665 | ppp_res.next_ib = honest_ppp_ab; |
| 666 | ppp_stat->bw_update = true; |
| 667 | pr_debug("solid fill ab = %llx, total ab = %llx ", |
| 668 | (ppp_res.solid_fill_byte * fps), honest_ppp_ab); |
| 669 | pr_debug("(%d fps) Solid_fill_vote %d\n", |
| 670 | fps, mdp3_res->solid_fill_vote_en); |
| 671 | ATRACE_INT("mdp3_ppp_bus_quota", honest_ppp_ab); |
| 672 | } |
| 673 | ppp_res.clk_rate = mdp3_clk_calc(mfd, lreq, fps); |
| 674 | ATRACE_INT("mdp3_ppp_clk_rate", ppp_res.clk_rate); |
| 675 | ATRACE_END(__func__); |
| 676 | return 0; |
| 677 | } |
| 678 | |
| 679 | int mdp3_ppp_turnon(struct msm_fb_data_type *mfd, int on_off) |
| 680 | { |
| 681 | uint64_t ab = 0, ib = 0; |
| 682 | int rate = 0; |
| 683 | int rc; |
| 684 | |
| 685 | if (on_off) { |
| 686 | rate = ppp_res.clk_rate; |
| 687 | ab = ppp_res.next_ab; |
| 688 | ib = ppp_res.next_ib; |
| 689 | } |
| 690 | mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, rate, MDP3_CLIENT_PPP); |
| 691 | rc = mdp3_res_update(on_off, 0, MDP3_CLIENT_PPP); |
| 692 | if (rc < 0) { |
| 693 | pr_err("%s: mdp3_clk_enable failed\n", __func__); |
| 694 | return rc; |
| 695 | } |
| 696 | rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP, ab, ib); |
| 697 | if (rc < 0) { |
| 698 | mdp3_res_update(!on_off, 0, MDP3_CLIENT_PPP); |
| 699 | pr_err("%s: scale_set_quota failed\n", __func__); |
| 700 | return rc; |
| 701 | } |
| 702 | ppp_stat->bw_on = on_off; |
| 703 | ppp_stat->mdp_clk = MDP_CORE_CLK_RATE_SVS; |
| 704 | ppp_stat->bw_update = false; |
| 705 | return 0; |
| 706 | } |
| 707 | |
| 708 | void mdp3_start_ppp(struct ppp_blit_op *blit_op) |
| 709 | { |
| 710 | /* Wait for the pipe to clear */ |
| 711 | if (MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS) & |
| 712 | MDP3_PPP_ACTIVE) { |
| 713 | pr_err("ppp core is hung up on previous request\n"); |
| 714 | return; |
| 715 | } |
| 716 | config_ppp_op_mode(blit_op); |
| 717 | if (blit_op->solid_fill) { |
| 718 | MDP3_REG_WRITE(0x10138, 0x10000000); |
| 719 | MDP3_REG_WRITE(0x1014c, 0xffffffff); |
| 720 | MDP3_REG_WRITE(0x101b8, 0); |
| 721 | MDP3_REG_WRITE(0x101bc, 0); |
| 722 | MDP3_REG_WRITE(0x1013c, 0); |
| 723 | MDP3_REG_WRITE(0x10140, 0); |
| 724 | MDP3_REG_WRITE(0x10144, 0); |
| 725 | MDP3_REG_WRITE(0x10148, 0); |
| 726 | MDP3_REG_WRITE(MDP3_TFETCH_FILL_COLOR, |
| 727 | blit_op->solid_fill_color); |
| 728 | MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL, |
| 729 | ENABLE_SOLID_FILL); |
| 730 | } else { |
| 731 | MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL, |
| 732 | DISABLE_SOLID_FILL); |
| 733 | } |
| 734 | /* Skip PPP kickoff for SMART_BLIT BG layer */ |
| 735 | if (blit_op->mdp_op & MDPOP_SMART_BLIT) |
| 736 | pr_debug("Skip mdp3_ppp_kickoff\n"); |
| 737 | else |
| 738 | mdp3_ppp_kickoff(); |
| 739 | |
| 740 | if (!(blit_op->solid_fill)) { |
| 741 | ppp_res.solid_fill_pixel = 0; |
| 742 | ppp_res.solid_fill_byte = 0; |
| 743 | } |
| 744 | } |
| 745 | |
| 746 | static int solid_fill_workaround(struct mdp_blit_req *req, |
| 747 | struct ppp_blit_op *blit_op) |
| 748 | { |
| 749 | /* Make width 2 when there is a solid fill of width 1, and make |
| 750 | * sure width does not become zero while trying to avoid odd width |
| 751 | */ |
| 752 | if (blit_op->dst.roi.width == 1) { |
| 753 | if (req->dst_rect.x + 2 > req->dst.width) { |
| 754 | pr_err("%s: Unable to handle solid fill of width 1", |
| 755 | __func__); |
| 756 | return -EINVAL; |
| 757 | } |
| 758 | blit_op->dst.roi.width = 2; |
| 759 | } |
| 760 | if (blit_op->src.roi.width == 1) { |
| 761 | if (req->src_rect.x + 2 > req->src.width) { |
| 762 | pr_err("%s: Unable to handle solid fill of width 1", |
| 763 | __func__); |
| 764 | return -EINVAL; |
| 765 | } |
| 766 | blit_op->src.roi.width = 2; |
| 767 | } |
| 768 | |
| 769 | /* Avoid odd width, as it could hang ppp during solid fill */ |
| 770 | blit_op->dst.roi.width = (blit_op->dst.roi.width / 2) * 2; |
| 771 | blit_op->src.roi.width = (blit_op->src.roi.width / 2) * 2; |
| 772 | |
| 773 | /* Set src format to RGBX, to avoid ppp hang issues */ |
| 774 | blit_op->src.color_fmt = MDP_RGBX_8888; |
| 775 | |
| 776 | /* Avoid RGBA format, as it could hang ppp during solid fill */ |
| 777 | if (blit_op->dst.color_fmt == MDP_RGBA_8888) |
| 778 | blit_op->dst.color_fmt = MDP_RGBX_8888; |
| 779 | return 0; |
| 780 | } |
| 781 | |
| 782 | static int mdp3_ppp_process_req(struct ppp_blit_op *blit_op, |
| 783 | struct mdp_blit_req *req, struct mdp3_img_data *src_data, |
| 784 | struct mdp3_img_data *dst_data) |
| 785 | { |
| 786 | unsigned long srcp0_start, srcp0_len, dst_start, dst_len; |
| 787 | uint32_t dst_width, dst_height; |
| 788 | int ret = 0; |
| 789 | |
| 790 | srcp0_start = (unsigned long) src_data->addr; |
| 791 | srcp0_len = (unsigned long) src_data->len; |
| 792 | dst_start = (unsigned long) dst_data->addr; |
| 793 | dst_len = (unsigned long) dst_data->len; |
| 794 | |
| 795 | blit_op->dst.prop.width = req->dst.width; |
| 796 | blit_op->dst.prop.height = req->dst.height; |
| 797 | |
| 798 | blit_op->dst.color_fmt = req->dst.format; |
| 799 | blit_op->dst.p0 = (void *) dst_start; |
| 800 | blit_op->dst.p0 += req->dst.offset; |
| 801 | |
| 802 | blit_op->dst.roi.x = req->dst_rect.x; |
| 803 | blit_op->dst.roi.y = req->dst_rect.y; |
| 804 | blit_op->dst.roi.width = req->dst_rect.w; |
| 805 | blit_op->dst.roi.height = req->dst_rect.h; |
| 806 | |
| 807 | blit_op->src.roi.x = req->src_rect.x; |
| 808 | blit_op->src.roi.y = req->src_rect.y; |
| 809 | blit_op->src.roi.width = req->src_rect.w; |
| 810 | blit_op->src.roi.height = req->src_rect.h; |
| 811 | |
| 812 | blit_op->src.prop.width = req->src.width; |
| 813 | blit_op->src.prop.height = req->src.height; |
| 814 | blit_op->src.color_fmt = req->src.format; |
| 815 | |
| 816 | |
| 817 | blit_op->src.p0 = (void *) (srcp0_start + req->src.offset); |
| 818 | if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_ADRENO) |
| 819 | blit_op->src.p1 = |
| 820 | (void *) ((uint32_t) blit_op->src.p0 + |
| 821 | ALIGN((ALIGN(req->src.width, 32) * |
| 822 | ALIGN(req->src.height, 32)), 4096)); |
| 823 | else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS) |
| 824 | blit_op->src.p1 = |
| 825 | (void *) ((uint32_t) blit_op->src.p0 + |
| 826 | ALIGN((ALIGN(req->src.width, 128) * |
| 827 | ALIGN(req->src.height, 32)), 4096)); |
| 828 | else |
| 829 | blit_op->src.p1 = (void *) ((uint32_t) blit_op->src.p0 + |
| 830 | req->src.width * req->src.height); |
| 831 | |
| 832 | if (req->flags & MDP_IS_FG) |
| 833 | blit_op->mdp_op |= MDPOP_LAYER_IS_FG; |
| 834 | |
| 835 | /* blending check */ |
| 836 | if (req->transp_mask != MDP_TRANSP_NOP) { |
| 837 | blit_op->mdp_op |= MDPOP_TRANSP; |
| 838 | blit_op->blend.trans_color = |
| 839 | mdp3_calc_tpval(&blit_op->src, req->transp_mask); |
| 840 | } else { |
| 841 | blit_op->blend.trans_color = 0; |
| 842 | } |
| 843 | |
| 844 | req->alpha &= 0xff; |
| 845 | if (req->alpha < MDP_ALPHA_NOP) { |
| 846 | blit_op->mdp_op |= MDPOP_ALPHAB; |
| 847 | blit_op->blend.const_alpha = req->alpha; |
| 848 | } else { |
| 849 | blit_op->blend.const_alpha = 0xff; |
| 850 | } |
| 851 | |
| 852 | /* rotation check */ |
| 853 | if (req->flags & MDP_FLIP_LR) |
| 854 | blit_op->mdp_op |= MDPOP_LR; |
| 855 | if (req->flags & MDP_FLIP_UD) |
| 856 | blit_op->mdp_op |= MDPOP_UD; |
| 857 | if (req->flags & MDP_ROT_90) |
| 858 | blit_op->mdp_op |= MDPOP_ROT90; |
| 859 | if (req->flags & MDP_DITHER) |
| 860 | blit_op->mdp_op |= MDPOP_DITHER; |
| 861 | |
| 862 | if (req->flags & MDP_BLEND_FG_PREMULT) |
| 863 | blit_op->mdp_op |= MDPOP_FG_PM_ALPHA; |
| 864 | |
| 865 | /* scale check */ |
| 866 | if (req->flags & MDP_ROT_90) { |
| 867 | dst_width = req->dst_rect.h; |
| 868 | dst_height = req->dst_rect.w; |
| 869 | } else { |
| 870 | dst_width = req->dst_rect.w; |
| 871 | dst_height = req->dst_rect.h; |
| 872 | } |
| 873 | |
| 874 | if ((blit_op->src.roi.width != dst_width) || |
| 875 | (blit_op->src.roi.height != dst_height)) |
| 876 | blit_op->mdp_op |= MDPOP_ASCALE; |
| 877 | |
| 878 | if (req->flags & MDP_BLUR) |
| 879 | blit_op->mdp_op |= MDPOP_ASCALE | MDPOP_BLUR; |
| 880 | |
| 881 | if (req->flags & MDP_SOLID_FILL) { |
| 882 | ret = solid_fill_workaround(req, blit_op); |
| 883 | if (ret) |
| 884 | return ret; |
| 885 | |
| 886 | blit_op->solid_fill_color = (req->const_color.g & 0xFF)| |
| 887 | (req->const_color.r & 0xFF) << 8 | |
| 888 | (req->const_color.b & 0xFF) << 16 | |
| 889 | (req->const_color.alpha & 0xFF) << 24; |
| 890 | blit_op->solid_fill = true; |
| 891 | } else { |
| 892 | blit_op->solid_fill = false; |
| 893 | } |
| 894 | |
| 895 | if (req->flags & MDP_SMART_BLIT) |
| 896 | blit_op->mdp_op |= MDPOP_SMART_BLIT; |
| 897 | |
| 898 | return ret; |
| 899 | } |
| 900 | |
| 901 | static void mdp3_ppp_tile_workaround(struct ppp_blit_op *blit_op, |
| 902 | struct mdp_blit_req *req) |
| 903 | { |
| 904 | int dst_h, src_w, i; |
| 905 | uint32_t mdp_op = blit_op->mdp_op; |
| 906 | void *src_p0 = blit_op->src.p0; |
| 907 | void *src_p1 = blit_op->src.p1; |
| 908 | void *dst_p0 = blit_op->dst.p0; |
| 909 | |
| 910 | src_w = req->src_rect.w; |
| 911 | dst_h = blit_op->dst.roi.height; |
| 912 | /* bg tile fetching HW workaround */ |
| 913 | for (i = 0; i < (req->dst_rect.h / 16); i++) { |
| 914 | /* this tile size */ |
| 915 | blit_op->dst.roi.height = 16; |
| 916 | blit_op->src.roi.width = |
| 917 | (16 * req->src_rect.w) / req->dst_rect.h; |
| 918 | |
| 919 | /* if it's out of scale range... */ |
| 920 | if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) / |
| 921 | blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR) |
| 922 | blit_op->src.roi.width = |
| 923 | (MDP_SCALE_Q_FACTOR * |
| 924 | blit_op->dst.roi.height) / |
| 925 | MDP_MAX_X_SCALE_FACTOR; |
| 926 | else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) / |
| 927 | blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR) |
| 928 | blit_op->src.roi.width = |
| 929 | (MDP_SCALE_Q_FACTOR * |
| 930 | blit_op->dst.roi.height) / |
| 931 | MDP_MIN_X_SCALE_FACTOR; |
| 932 | |
| 933 | mdp3_start_ppp(blit_op); |
| 934 | |
| 935 | /* next tile location */ |
| 936 | blit_op->dst.roi.y += 16; |
| 937 | blit_op->src.roi.x += blit_op->src.roi.width; |
| 938 | |
| 939 | /* this is for a remainder update */ |
| 940 | dst_h -= 16; |
| 941 | src_w -= blit_op->src.roi.width; |
| 942 | /* restore parameters that may have been overwritten */ |
| 943 | blit_op->mdp_op = mdp_op; |
| 944 | blit_op->src.p0 = src_p0; |
| 945 | blit_op->src.p1 = src_p1; |
| 946 | blit_op->dst.p0 = dst_p0; |
| 947 | } |
| 948 | |
| 949 | if ((dst_h < 0) || (src_w < 0)) |
| 950 | pr_err("msm_fb: mdp_blt_ex() unexpected result! line:%d\n", |
| 951 | __LINE__); |
| 952 | |
| 953 | /* remainder update */ |
| 954 | if ((dst_h > 0) && (src_w > 0)) { |
| 955 | u32 tmp_v; |
| 956 | |
| 957 | blit_op->dst.roi.height = dst_h; |
| 958 | blit_op->src.roi.width = src_w; |
| 959 | |
| 960 | if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) / |
| 961 | blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR) { |
| 962 | tmp_v = (MDP_SCALE_Q_FACTOR * |
| 963 | blit_op->dst.roi.height) / |
| 964 | MDP_MAX_X_SCALE_FACTOR + |
| 965 | ((MDP_SCALE_Q_FACTOR * |
| 966 | blit_op->dst.roi.height) % |
| 967 | MDP_MAX_X_SCALE_FACTOR ? 1 : 0); |
| 968 | |
| 969 | /* move x location as roi width gets bigger */ |
| 970 | blit_op->src.roi.x -= tmp_v - blit_op->src.roi.width; |
| 971 | blit_op->src.roi.width = tmp_v; |
| 972 | } else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) / |
| 973 | blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR) { |
| 974 | tmp_v = (MDP_SCALE_Q_FACTOR * |
| 975 | blit_op->dst.roi.height) / |
| 976 | MDP_MIN_X_SCALE_FACTOR + |
| 977 | ((MDP_SCALE_Q_FACTOR * |
| 978 | blit_op->dst.roi.height) % |
| 979 | MDP_MIN_X_SCALE_FACTOR ? 1 : 0); |
| 980 | /* |
| 981 | * we don't move x location for continuity of |
| 982 | * source image |
| 983 | */ |
| 984 | blit_op->src.roi.width = tmp_v; |
| 985 | } |
| 986 | |
| 987 | |
| 988 | mdp3_start_ppp(blit_op); |
| 989 | } |
| 990 | } |
| 991 | |
| 992 | static int mdp3_ppp_blit(struct msm_fb_data_type *mfd, |
| 993 | struct mdp_blit_req *req, struct mdp3_img_data *src_data, |
| 994 | struct mdp3_img_data *dst_data) |
| 995 | { |
| 996 | struct ppp_blit_op blit_op; |
| 997 | int ret = 0; |
| 998 | |
| 999 | memset(&blit_op, 0, sizeof(blit_op)); |
| 1000 | |
| 1001 | if (req->dst.format == MDP_FB_FORMAT) |
| 1002 | req->dst.format = mfd->fb_imgType; |
| 1003 | if (req->src.format == MDP_FB_FORMAT) |
| 1004 | req->src.format = mfd->fb_imgType; |
| 1005 | |
| 1006 | if (mdp3_ppp_verify_req(req)) { |
| 1007 | pr_err("%s: invalid image!\n", __func__); |
| 1008 | return -EINVAL; |
| 1009 | } |
| 1010 | |
| 1011 | ret = mdp3_ppp_process_req(&blit_op, req, src_data, dst_data); |
| 1012 | if (ret) { |
| 1013 | pr_err("%s: Failed to process the blit request", __func__); |
| 1014 | return ret; |
| 1015 | } |
| 1016 | |
| 1017 | if (((blit_op.mdp_op & (MDPOP_TRANSP | MDPOP_ALPHAB)) || |
| 1018 | (req->src.format == MDP_ARGB_8888) || |
| 1019 | (req->src.format == MDP_BGRA_8888) || |
| 1020 | (req->src.format == MDP_RGBA_8888)) && |
| 1021 | (blit_op.mdp_op & MDPOP_ROT90) && (req->dst_rect.w <= 16)) { |
| 1022 | mdp3_ppp_tile_workaround(&blit_op, req); |
| 1023 | } else { |
| 1024 | mdp3_start_ppp(&blit_op); |
| 1025 | } |
| 1026 | |
| 1027 | return 0; |
| 1028 | } |
| 1029 | |
| 1030 | static int mdp3_ppp_blit_workaround(struct msm_fb_data_type *mfd, |
| 1031 | struct mdp_blit_req *req, unsigned int remainder, |
| 1032 | struct mdp3_img_data *src_data, |
| 1033 | struct mdp3_img_data *dst_data) |
| 1034 | { |
| 1035 | int ret; |
| 1036 | struct mdp_blit_req splitreq; |
| 1037 | int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1; |
| 1038 | int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1; |
| 1039 | |
| 1040 | /* make new request as provide by user */ |
| 1041 | splitreq = *req; |
| 1042 | |
| 1043 | /* break dest roi at width*/ |
| 1044 | d_y_0 = d_y_1 = req->dst_rect.y; |
| 1045 | d_h_0 = d_h_1 = req->dst_rect.h; |
| 1046 | d_x_0 = req->dst_rect.x; |
| 1047 | |
| 1048 | if (remainder == 14 || remainder == 6) |
| 1049 | d_w_1 = req->dst_rect.w / 2; |
| 1050 | else |
| 1051 | d_w_1 = (req->dst_rect.w - 1) / 2 - 1; |
| 1052 | |
| 1053 | d_w_0 = req->dst_rect.w - d_w_1; |
| 1054 | d_x_1 = d_x_0 + d_w_0; |
| 1055 | /* blit first region */ |
| 1056 | if (((splitreq.flags & 0x07) == 0x07) || |
| 1057 | ((splitreq.flags & 0x07) == 0x05) || |
| 1058 | ((splitreq.flags & 0x07) == 0x02) || |
| 1059 | ((splitreq.flags & 0x07) == 0x0)) { |
| 1060 | |
| 1061 | if (splitreq.flags & MDP_ROT_90) { |
| 1062 | s_x_0 = s_x_1 = req->src_rect.x; |
| 1063 | s_w_0 = s_w_1 = req->src_rect.w; |
| 1064 | s_y_0 = req->src_rect.y; |
| 1065 | s_h_1 = (req->src_rect.h * d_w_1) / |
| 1066 | req->dst_rect.w; |
| 1067 | s_h_0 = req->src_rect.h - s_h_1; |
| 1068 | s_y_1 = s_y_0 + s_h_0; |
| 1069 | if (d_w_1 >= 8 * s_h_1) { |
| 1070 | s_h_1++; |
| 1071 | s_y_1--; |
| 1072 | } |
| 1073 | } else { |
| 1074 | s_y_0 = s_y_1 = req->src_rect.y; |
| 1075 | s_h_0 = s_h_1 = req->src_rect.h; |
| 1076 | s_x_0 = req->src_rect.x; |
| 1077 | s_w_1 = (req->src_rect.w * d_w_1) / |
| 1078 | req->dst_rect.w; |
| 1079 | s_w_0 = req->src_rect.w - s_w_1; |
| 1080 | s_x_1 = s_x_0 + s_w_0; |
| 1081 | if (d_w_1 >= 8 * s_w_1) { |
| 1082 | s_w_1++; |
| 1083 | s_x_1--; |
| 1084 | } |
| 1085 | } |
| 1086 | |
| 1087 | splitreq.src_rect.h = s_h_0; |
| 1088 | splitreq.src_rect.y = s_y_0; |
| 1089 | splitreq.dst_rect.h = d_h_0; |
| 1090 | splitreq.dst_rect.y = d_y_0; |
| 1091 | splitreq.src_rect.x = s_x_0; |
| 1092 | splitreq.src_rect.w = s_w_0; |
| 1093 | splitreq.dst_rect.x = d_x_0; |
| 1094 | splitreq.dst_rect.w = d_w_0; |
| 1095 | } else { |
| 1096 | if (splitreq.flags & MDP_ROT_90) { |
| 1097 | s_x_0 = s_x_1 = req->src_rect.x; |
| 1098 | s_w_0 = s_w_1 = req->src_rect.w; |
| 1099 | s_y_0 = req->src_rect.y; |
| 1100 | s_h_1 = (req->src_rect.h * d_w_0) / |
| 1101 | req->dst_rect.w; |
| 1102 | s_h_0 = req->src_rect.h - s_h_1; |
| 1103 | s_y_1 = s_y_0 + s_h_0; |
| 1104 | if (d_w_0 >= 8 * s_h_1) { |
| 1105 | s_h_1++; |
| 1106 | s_y_1--; |
| 1107 | } |
| 1108 | } else { |
| 1109 | s_y_0 = s_y_1 = req->src_rect.y; |
| 1110 | s_h_0 = s_h_1 = req->src_rect.h; |
| 1111 | s_x_0 = req->src_rect.x; |
| 1112 | s_w_1 = (req->src_rect.w * d_w_0) / |
| 1113 | req->dst_rect.w; |
| 1114 | s_w_0 = req->src_rect.w - s_w_1; |
| 1115 | s_x_1 = s_x_0 + s_w_0; |
| 1116 | if (d_w_0 >= 8 * s_w_1) { |
| 1117 | s_w_1++; |
| 1118 | s_x_1--; |
| 1119 | } |
| 1120 | } |
| 1121 | splitreq.src_rect.h = s_h_0; |
| 1122 | splitreq.src_rect.y = s_y_0; |
| 1123 | splitreq.dst_rect.h = d_h_1; |
| 1124 | splitreq.dst_rect.y = d_y_1; |
| 1125 | splitreq.src_rect.x = s_x_0; |
| 1126 | splitreq.src_rect.w = s_w_0; |
| 1127 | splitreq.dst_rect.x = d_x_1; |
| 1128 | splitreq.dst_rect.w = d_w_1; |
| 1129 | } |
| 1130 | |
| 1131 | /* No need to split in height */ |
| 1132 | ret = mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data); |
| 1133 | |
| 1134 | if (ret) |
| 1135 | return ret; |
| 1136 | /* blit second region */ |
| 1137 | if (((splitreq.flags & 0x07) == 0x07) || |
| 1138 | ((splitreq.flags & 0x07) == 0x05) || |
| 1139 | ((splitreq.flags & 0x07) == 0x02) || |
| 1140 | ((splitreq.flags & 0x07) == 0x0)) { |
| 1141 | splitreq.src_rect.h = s_h_1; |
| 1142 | splitreq.src_rect.y = s_y_1; |
| 1143 | splitreq.dst_rect.h = d_h_1; |
| 1144 | splitreq.dst_rect.y = d_y_1; |
| 1145 | splitreq.src_rect.x = s_x_1; |
| 1146 | splitreq.src_rect.w = s_w_1; |
| 1147 | splitreq.dst_rect.x = d_x_1; |
| 1148 | splitreq.dst_rect.w = d_w_1; |
| 1149 | } else { |
| 1150 | splitreq.src_rect.h = s_h_1; |
| 1151 | splitreq.src_rect.y = s_y_1; |
| 1152 | splitreq.dst_rect.h = d_h_0; |
| 1153 | splitreq.dst_rect.y = d_y_0; |
| 1154 | splitreq.src_rect.x = s_x_1; |
| 1155 | splitreq.src_rect.w = s_w_1; |
| 1156 | splitreq.dst_rect.x = d_x_0; |
| 1157 | splitreq.dst_rect.w = d_w_0; |
| 1158 | } |
| 1159 | |
| 1160 | /* No need to split in height ... just width */ |
| 1161 | return mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data); |
| 1162 | } |
| 1163 | |
| 1164 | int mdp3_ppp_start_blit(struct msm_fb_data_type *mfd, |
| 1165 | struct mdp_blit_req *req, |
| 1166 | struct mdp3_img_data *src_data, |
| 1167 | struct mdp3_img_data *dst_data) |
| 1168 | { |
| 1169 | int ret; |
| 1170 | unsigned int remainder = 0, is_bpp_4 = 0; |
| 1171 | |
| 1172 | if (unlikely(req->src_rect.h == 0 || req->src_rect.w == 0)) { |
| 1173 | pr_err("mdp_ppp: src img of zero size!\n"); |
| 1174 | return -EINVAL; |
| 1175 | } |
| 1176 | if (unlikely(req->dst_rect.h == 0 || req->dst_rect.w == 0)) |
| 1177 | return 0; |
| 1178 | |
| 1179 | /* MDP width split workaround */ |
| 1180 | remainder = (req->dst_rect.w) % 16; |
| 1181 | ret = ppp_get_bpp(req->dst.format, mfd->fb_imgType); |
| 1182 | if (ret <= 0) { |
| 1183 | pr_err("mdp_ppp: incorrect bpp!\n"); |
| 1184 | return -EINVAL; |
| 1185 | } |
| 1186 | is_bpp_4 = (ret == 4) ? 1 : 0; |
| 1187 | |
| 1188 | if ((is_bpp_4 && (remainder == 6 || remainder == 14)) && |
| 1189 | !(req->flags & MDP_SOLID_FILL)) |
| 1190 | ret = mdp3_ppp_blit_workaround(mfd, req, remainder, |
| 1191 | src_data, dst_data); |
| 1192 | else |
| 1193 | ret = mdp3_ppp_blit(mfd, req, src_data, dst_data); |
| 1194 | return ret; |
| 1195 | } |
| 1196 | |
| 1197 | void mdp3_ppp_wait_for_fence(struct blit_req_list *req) |
| 1198 | { |
| 1199 | int i, ret = 0; |
| 1200 | |
| 1201 | ATRACE_BEGIN(__func__); |
| 1202 | /* buf sync */ |
| 1203 | for (i = 0; i < req->acq_fen_cnt; i++) { |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1204 | ret = mdss_wait_sync_fence(req->acq_fen[i], |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1205 | WAIT_FENCE_FINAL_TIMEOUT); |
| 1206 | if (ret < 0) { |
| 1207 | pr_err("%s: sync_fence_wait failed! ret = %x\n", |
| 1208 | __func__, ret); |
| 1209 | break; |
| 1210 | } |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1211 | mdss_put_sync_fence(req->acq_fen[i]); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1212 | } |
| 1213 | ATRACE_END(__func__); |
| 1214 | if (ret < 0) { |
| 1215 | while (i < req->acq_fen_cnt) { |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1216 | mdss_put_sync_fence(req->acq_fen[i]); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1217 | i++; |
| 1218 | } |
| 1219 | } |
| 1220 | req->acq_fen_cnt = 0; |
| 1221 | } |
| 1222 | |
| 1223 | void mdp3_ppp_signal_timeline(struct blit_req_list *req) |
| 1224 | { |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1225 | mdss_inc_timeline(ppp_stat->timeline, 1); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1226 | MDSS_XLOG(ppp_stat->timeline->value, ppp_stat->timeline_value); |
| 1227 | req->last_rel_fence = req->cur_rel_fence; |
| 1228 | req->cur_rel_fence = 0; |
| 1229 | } |
| 1230 | |
| 1231 | |
| 1232 | static void mdp3_ppp_deinit_buf_sync(struct blit_req_list *req) |
| 1233 | { |
| 1234 | int i; |
| 1235 | |
| 1236 | put_unused_fd(req->cur_rel_fen_fd); |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1237 | mdss_put_sync_fence(req->cur_rel_fence); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1238 | req->cur_rel_fence = NULL; |
| 1239 | req->cur_rel_fen_fd = 0; |
| 1240 | ppp_stat->timeline_value--; |
| 1241 | for (i = 0; i < req->acq_fen_cnt; i++) |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1242 | mdss_put_sync_fence(req->acq_fen[i]); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1243 | req->acq_fen_cnt = 0; |
| 1244 | } |
| 1245 | |
| 1246 | static int mdp3_ppp_handle_buf_sync(struct blit_req_list *req, |
| 1247 | struct mdp_buf_sync *buf_sync) |
| 1248 | { |
| 1249 | int i, fence_cnt = 0, ret = 0; |
| 1250 | int acq_fen_fd[MDP_MAX_FENCE_FD]; |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1251 | struct mdss_fence *fence; |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1252 | |
| 1253 | if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) || |
| 1254 | (ppp_stat->timeline == NULL)) |
| 1255 | return -EINVAL; |
| 1256 | |
| 1257 | if (buf_sync->acq_fen_fd_cnt) |
| 1258 | ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd, |
| 1259 | buf_sync->acq_fen_fd_cnt * sizeof(int)); |
| 1260 | if (ret) { |
| 1261 | pr_err("%s: copy_from_user failed\n", __func__); |
| 1262 | return ret; |
| 1263 | } |
| 1264 | for (i = 0; i < buf_sync->acq_fen_fd_cnt; i++) { |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1265 | fence = mdss_get_fd_sync_fence(acq_fen_fd[i]); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1266 | if (fence == NULL) { |
| 1267 | pr_info("%s: null fence! i=%d fd=%d\n", __func__, i, |
| 1268 | acq_fen_fd[i]); |
| 1269 | ret = -EINVAL; |
| 1270 | break; |
| 1271 | } |
| 1272 | req->acq_fen[i] = fence; |
| 1273 | } |
| 1274 | fence_cnt = i; |
| 1275 | if (ret) |
| 1276 | goto buf_sync_err_1; |
| 1277 | req->acq_fen_cnt = fence_cnt; |
| 1278 | if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT) |
| 1279 | mdp3_ppp_wait_for_fence(req); |
| 1280 | |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1281 | MDSS_XLOG(ppp_stat->timeline_value); |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1282 | |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1283 | /* create fence */ |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1284 | req->cur_rel_fence = mdss_get_sync_fence(ppp_stat->timeline, |
| 1285 | "ppp_fence", NULL, ppp_stat->timeline_value++); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1286 | if (req->cur_rel_fence == NULL) { |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1287 | req->cur_rel_sync_pt = NULL; |
| 1288 | pr_err("%s: cannot create fence\n", __func__); |
| 1289 | ret = -ENOMEM; |
| 1290 | goto buf_sync_err_2; |
| 1291 | } |
| 1292 | /* create fd */ |
| 1293 | return ret; |
| 1294 | buf_sync_err_2: |
| 1295 | ppp_stat->timeline_value--; |
| 1296 | buf_sync_err_1: |
| 1297 | for (i = 0; i < fence_cnt; i++) |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1298 | mdss_put_sync_fence(req->acq_fen[i]); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1299 | req->acq_fen_cnt = 0; |
| 1300 | return ret; |
| 1301 | } |
| 1302 | |
| 1303 | void mdp3_ppp_req_push(struct blit_req_queue *req_q, struct blit_req_list *req) |
| 1304 | { |
| 1305 | int idx = req_q->push_idx; |
| 1306 | |
| 1307 | req_q->req[idx] = *req; |
| 1308 | req_q->count++; |
| 1309 | req_q->push_idx = (req_q->push_idx + 1) % MDP3_PPP_MAX_LIST_REQ; |
| 1310 | } |
| 1311 | |
| 1312 | struct blit_req_list *mdp3_ppp_next_req(struct blit_req_queue *req_q) |
| 1313 | { |
| 1314 | struct blit_req_list *req; |
| 1315 | |
| 1316 | if (req_q->count == 0) |
| 1317 | return NULL; |
| 1318 | req = &req_q->req[req_q->pop_idx]; |
| 1319 | return req; |
| 1320 | } |
| 1321 | |
| 1322 | void mdp3_ppp_req_pop(struct blit_req_queue *req_q) |
| 1323 | { |
| 1324 | req_q->count--; |
| 1325 | req_q->pop_idx = (req_q->pop_idx + 1) % MDP3_PPP_MAX_LIST_REQ; |
| 1326 | } |
| 1327 | |
| 1328 | void mdp3_free_fw_timer_func(unsigned long arg) |
| 1329 | { |
| 1330 | mdp3_res->solid_fill_vote_en = false; |
| 1331 | schedule_work(&ppp_stat->free_bw_work); |
| 1332 | } |
| 1333 | |
| 1334 | static void mdp3_free_bw_wq_handler(struct work_struct *work) |
| 1335 | { |
| 1336 | struct msm_fb_data_type *mfd = ppp_stat->mfd; |
| 1337 | |
| 1338 | mutex_lock(&ppp_stat->config_ppp_mutex); |
| 1339 | if (ppp_stat->bw_on) |
| 1340 | mdp3_ppp_turnon(mfd, 0); |
| 1341 | mutex_unlock(&ppp_stat->config_ppp_mutex); |
| 1342 | } |
| 1343 | |
| 1344 | static bool is_hw_workaround_needed(struct mdp_blit_req req) |
| 1345 | { |
| 1346 | bool result = false; |
| 1347 | bool is_bpp_4 = false; |
| 1348 | uint32_t remainder = 0; |
| 1349 | uint32_t bpp = ppp_get_bpp(req.dst.format, ppp_stat->mfd->fb_imgType); |
| 1350 | |
| 1351 | /* MDP width split workaround */ |
| 1352 | remainder = (req.dst_rect.w) % 16; |
| 1353 | is_bpp_4 = (bpp == 4) ? 1 : 0; |
| 1354 | if ((is_bpp_4 && (remainder == 6 || remainder == 14)) && |
| 1355 | !(req.flags & MDP_SOLID_FILL)) |
| 1356 | result = true; |
| 1357 | |
| 1358 | /* bg tile fetching HW workaround */ |
| 1359 | if (((req.alpha < MDP_ALPHA_NOP) || |
| 1360 | (req.transp_mask != MDP_TRANSP_NOP) || |
| 1361 | (req.src.format == MDP_ARGB_8888) || |
| 1362 | (req.src.format == MDP_BGRA_8888) || |
| 1363 | (req.src.format == MDP_RGBA_8888)) && |
| 1364 | (req.flags & MDP_ROT_90) && (req.dst_rect.w <= 16)) |
| 1365 | result = true; |
| 1366 | |
| 1367 | return result; |
| 1368 | } |
| 1369 | |
| 1370 | static bool is_roi_equal(struct mdp_blit_req req0, |
| 1371 | struct mdp_blit_req req1) |
| 1372 | { |
| 1373 | bool result = false; |
| 1374 | struct mdss_panel_info *panel_info = ppp_stat->mfd->panel_info; |
| 1375 | |
| 1376 | /* |
| 1377 | * Check req0 and req1 layer destination ROI and return true if |
| 1378 | * they are equal. |
| 1379 | */ |
| 1380 | if ((req0.dst_rect.x == req1.dst_rect.x) && |
| 1381 | (req0.dst_rect.y == req1.dst_rect.y) && |
| 1382 | (req0.dst_rect.w == req1.dst_rect.w) && |
| 1383 | (req0.dst_rect.h == req1.dst_rect.h)) |
| 1384 | result = true; |
| 1385 | /* |
| 1386 | * Layers are source cropped and cropped layer width and hight are |
| 1387 | * same panel width and height |
| 1388 | */ |
| 1389 | else if ((req0.dst_rect.w == req1.dst_rect.w) && |
| 1390 | (req0.dst_rect.h == req1.dst_rect.h) && |
| 1391 | (req0.dst_rect.w == panel_info->xres) && |
| 1392 | (req0.dst_rect.h == panel_info->yres)) |
| 1393 | result = true; |
| 1394 | |
| 1395 | return result; |
| 1396 | } |
| 1397 | |
| 1398 | static bool is_scaling_needed(struct mdp_blit_req req) |
| 1399 | { |
| 1400 | bool result = true; |
| 1401 | |
| 1402 | /* Return true if layer need scaling else return false */ |
| 1403 | if ((req.src_rect.w == req.dst_rect.w) && |
| 1404 | (req.src_rect.h == req.dst_rect.h)) |
| 1405 | result = false; |
| 1406 | return result; |
| 1407 | } |
| 1408 | |
| 1409 | static bool is_blit_optimization_possible(struct blit_req_list *req, int indx) |
| 1410 | { |
| 1411 | int next = indx + 1; |
| 1412 | bool status = false; |
| 1413 | struct mdp3_img_data tmp_data; |
| 1414 | bool dst_roi_equal = false; |
| 1415 | bool hw_woraround_active = false; |
| 1416 | struct mdp_blit_req bg_req; |
| 1417 | struct mdp_blit_req fg_req; |
| 1418 | |
| 1419 | if (!(mdp3_res->smart_blit_en)) { |
| 1420 | pr_debug("Smart BLIT disabled from sysfs\n"); |
| 1421 | return status; |
| 1422 | } |
| 1423 | if (next < req->count) { |
| 1424 | bg_req = req->req_list[indx]; |
| 1425 | fg_req = req->req_list[next]; |
| 1426 | hw_woraround_active = is_hw_workaround_needed(bg_req); |
| 1427 | dst_roi_equal = is_roi_equal(bg_req, fg_req); |
| 1428 | /* |
| 1429 | * Check userspace Smart BLIT Flag for current and next |
| 1430 | * request Flag for smart blit FG layer index If blit |
| 1431 | * request at index "n" has MDP_SMART_BLIT flag set then |
| 1432 | * it will be used as BG layer in smart blit |
| 1433 | * and request at index "n+1" will be used as FG layer |
| 1434 | */ |
| 1435 | if ((bg_req.flags & MDP_SMART_BLIT) && |
| 1436 | (!(fg_req.flags & MDP_SMART_BLIT)) && |
| 1437 | (!(hw_woraround_active))) |
| 1438 | status = true; |
| 1439 | /* |
| 1440 | * Enable SMART blit between request 0(BG) & request 1(FG) when |
| 1441 | * destination ROI of BG and FG layer are same, |
| 1442 | * No scaling on BG layer |
| 1443 | * No rotation on BG Layer. |
| 1444 | * BG Layer color format is RGB and marked as MDP_IS_FG. |
| 1445 | */ |
| 1446 | else if ((mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) && |
| 1447 | (indx == 0) && (dst_roi_equal) && |
| 1448 | (bg_req.flags & MDP_IS_FG) && |
| 1449 | (!(is_scaling_needed(bg_req))) && |
| 1450 | (!(bg_req.flags & (MDP_ROT_90))) && |
| 1451 | (check_if_rgb(bg_req.src.format)) && |
| 1452 | (!(hw_woraround_active))) { |
| 1453 | status = true; |
| 1454 | req->req_list[indx].flags |= MDP_SMART_BLIT; |
| 1455 | pr_debug("Optimize RGB Blit for Req Indx %d\n", indx); |
| 1456 | } |
| 1457 | /* |
| 1458 | * Swap BG and FG layer to enable SMART blit between request |
| 1459 | * 0(BG) & request 1(FG) when destination ROI of BG and FG |
| 1460 | * layer are same, No scaling on FG and BG layer |
| 1461 | * No rotation on FG Layer. BG Layer color format is YUV |
| 1462 | */ |
| 1463 | else if ((indx == 0) && |
| 1464 | (mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) && |
| 1465 | (!(fg_req.flags & (MDP_ROT_90))) && (dst_roi_equal) && |
| 1466 | (!(check_if_rgb(bg_req.src.format))) && |
| 1467 | (!(hw_woraround_active))) { |
| 1468 | /* |
Arun kumar | b36a9e0 | 2018-05-23 14:05:49 +0530 | [diff] [blame] | 1469 | * Disable SMART blit for BG(YUV) layer when |
| 1470 | * Scaling on BG layer |
| 1471 | * Rotation on BG layer |
| 1472 | * UD flip on BG layer |
| 1473 | */ |
| 1474 | if ((is_scaling_needed(bg_req)) && ( |
| 1475 | bg_req.flags & MDP_ROT_90) && |
| 1476 | (bg_req.flags & MDP_FLIP_UD)) { |
| 1477 | pr_debug("YUV layer with ROT+UD_FLIP+Scaling Not supported\n"); |
| 1478 | return false; |
| 1479 | } |
| 1480 | /* |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1481 | * swap blit requests at index 0 and 1. YUV layer at |
| 1482 | * index 0 is replaced with UI layer request present |
| 1483 | * at index 1. Since UI layer will be in background |
| 1484 | * set IS_FG flag and clear it from YUV layer flags |
| 1485 | */ |
| 1486 | if (!(is_scaling_needed(req->req_list[next]))) { |
| 1487 | if (bg_req.flags & MDP_IS_FG) { |
| 1488 | req->req_list[indx].flags &= |
| 1489 | ~MDP_IS_FG; |
| 1490 | req->req_list[next].flags |= MDP_IS_FG; |
| 1491 | } |
| 1492 | bg_req = req->req_list[next]; |
| 1493 | req->req_list[next] = req->req_list[indx]; |
| 1494 | req->req_list[indx] = bg_req; |
| 1495 | |
| 1496 | tmp_data = req->src_data[next]; |
| 1497 | req->src_data[next] = req->src_data[indx]; |
| 1498 | req->src_data[indx] = tmp_data; |
| 1499 | |
| 1500 | tmp_data = req->dst_data[next]; |
| 1501 | req->dst_data[next] = req->dst_data[indx]; |
| 1502 | req->dst_data[indx] = tmp_data; |
| 1503 | status = true; |
| 1504 | req->req_list[indx].flags |= MDP_SMART_BLIT; |
| 1505 | pr_debug("Optimize YUV Blit for Req Indx %d\n", |
| 1506 | indx); |
| 1507 | } |
| 1508 | } |
| 1509 | } |
| 1510 | return status; |
| 1511 | } |
| 1512 | |
| 1513 | static void mdp3_ppp_blit_handler(struct kthread_work *work) |
| 1514 | { |
| 1515 | struct msm_fb_data_type *mfd = ppp_stat->mfd; |
| 1516 | struct blit_req_list *req; |
| 1517 | int i, rc = 0; |
| 1518 | bool smart_blit = false; |
| 1519 | int smart_blit_fg_index = -1; |
| 1520 | |
| 1521 | mutex_lock(&ppp_stat->config_ppp_mutex); |
| 1522 | req = mdp3_ppp_next_req(&ppp_stat->req_q); |
| 1523 | if (!req) { |
| 1524 | mutex_unlock(&ppp_stat->config_ppp_mutex); |
| 1525 | return; |
| 1526 | } |
| 1527 | |
| 1528 | if (!ppp_stat->bw_on) { |
| 1529 | mdp3_ppp_turnon(mfd, 1); |
| 1530 | if (rc < 0) { |
| 1531 | mutex_unlock(&ppp_stat->config_ppp_mutex); |
| 1532 | pr_err("%s: Enable ppp resources failed\n", __func__); |
| 1533 | return; |
| 1534 | } |
| 1535 | } |
| 1536 | while (req) { |
| 1537 | mdp3_ppp_wait_for_fence(req); |
| 1538 | mdp3_calc_ppp_res(mfd, req); |
| 1539 | if (ppp_res.clk_rate != ppp_stat->mdp_clk) { |
| 1540 | ppp_stat->mdp_clk = ppp_res.clk_rate; |
| 1541 | mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, |
| 1542 | ppp_stat->mdp_clk, MDP3_CLIENT_PPP); |
| 1543 | } |
| 1544 | if (ppp_stat->bw_update) { |
| 1545 | rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP, |
| 1546 | ppp_res.next_ab, ppp_res.next_ib); |
| 1547 | if (rc < 0) { |
| 1548 | pr_err("%s: bw set quota failed\n", __func__); |
| 1549 | return; |
| 1550 | } |
| 1551 | ppp_stat->bw_update = false; |
| 1552 | } |
| 1553 | ATRACE_BEGIN("mpd3_ppp_start"); |
| 1554 | for (i = 0; i < req->count; i++) { |
| 1555 | smart_blit = is_blit_optimization_possible(req, i); |
| 1556 | if (smart_blit) |
| 1557 | /* |
| 1558 | * Blit request index of FG layer in |
| 1559 | * smart blit |
| 1560 | */ |
| 1561 | smart_blit_fg_index = i + 1; |
| 1562 | if (!(req->req_list[i].flags & MDP_NO_BLIT)) { |
| 1563 | /* Do the actual blit. */ |
| 1564 | if (!rc) { |
| 1565 | rc = mdp3_ppp_start_blit(mfd, |
| 1566 | &(req->req_list[i]), |
| 1567 | &req->src_data[i], |
| 1568 | &req->dst_data[i]); |
| 1569 | } |
| 1570 | /* Unmap blit source buffer */ |
| 1571 | if (smart_blit == false) { |
| 1572 | mdp3_put_img(&req->src_data[i], |
| 1573 | MDP3_CLIENT_PPP); |
| 1574 | } |
| 1575 | if (smart_blit_fg_index == i) { |
| 1576 | /* Unmap smart blit BG buffer */ |
| 1577 | mdp3_put_img(&req->src_data[i - 1], |
| 1578 | MDP3_CLIENT_PPP); |
| 1579 | smart_blit_fg_index = -1; |
| 1580 | } |
| 1581 | mdp3_put_img(&req->dst_data[i], |
| 1582 | MDP3_CLIENT_PPP); |
| 1583 | smart_blit = false; |
| 1584 | } |
| 1585 | } |
| 1586 | ATRACE_END("mdp3_ppp_start"); |
| 1587 | /* Signal to release fence */ |
| 1588 | mutex_lock(&ppp_stat->req_mutex); |
| 1589 | mdp3_ppp_signal_timeline(req); |
| 1590 | mdp3_ppp_req_pop(&ppp_stat->req_q); |
| 1591 | req = mdp3_ppp_next_req(&ppp_stat->req_q); |
| 1592 | if (ppp_stat->wait_for_pop) |
| 1593 | complete(&ppp_stat->pop_q_comp); |
| 1594 | mutex_unlock(&ppp_stat->req_mutex); |
| 1595 | } |
| 1596 | mod_timer(&ppp_stat->free_bw_timer, jiffies + |
| 1597 | msecs_to_jiffies(MDP_RELEASE_BW_TIMEOUT)); |
| 1598 | mutex_unlock(&ppp_stat->config_ppp_mutex); |
| 1599 | } |
| 1600 | |
| 1601 | int mdp3_ppp_parse_req(void __user *p, |
| 1602 | struct mdp_async_blit_req_list *req_list_header, |
| 1603 | int async) |
| 1604 | { |
| 1605 | struct blit_req_list *req; |
| 1606 | struct blit_req_queue *req_q = &ppp_stat->req_q; |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1607 | struct mdss_fence *fence = NULL; |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1608 | int count, rc, idx, i; |
| 1609 | |
| 1610 | count = req_list_header->count; |
| 1611 | |
| 1612 | mutex_lock(&ppp_stat->req_mutex); |
| 1613 | while (req_q->count >= MDP3_PPP_MAX_LIST_REQ) { |
| 1614 | ppp_stat->wait_for_pop = true; |
| 1615 | mutex_unlock(&ppp_stat->req_mutex); |
| 1616 | rc = wait_for_completion_timeout( |
| 1617 | &ppp_stat->pop_q_comp, 5 * HZ); |
| 1618 | if (rc == 0) { |
| 1619 | /* This will only occur if there is serious problem */ |
| 1620 | pr_err("%s: timeout exiting queuing request\n", |
| 1621 | __func__); |
| 1622 | return -EBUSY; |
| 1623 | } |
| 1624 | mutex_lock(&ppp_stat->req_mutex); |
| 1625 | ppp_stat->wait_for_pop = false; |
| 1626 | } |
| 1627 | idx = req_q->push_idx; |
| 1628 | req = &req_q->req[idx]; |
| 1629 | |
| 1630 | if (copy_from_user(&req->req_list, p, |
| 1631 | sizeof(struct mdp_blit_req) * count)) { |
| 1632 | mutex_unlock(&ppp_stat->req_mutex); |
| 1633 | return -EFAULT; |
| 1634 | } |
| 1635 | |
| 1636 | rc = mdp3_ppp_handle_buf_sync(req, &req_list_header->sync); |
| 1637 | if (rc < 0) { |
| 1638 | pr_err("%s: Failed create sync point\n", __func__); |
| 1639 | mutex_unlock(&ppp_stat->req_mutex); |
| 1640 | return rc; |
| 1641 | } |
| 1642 | req->count = count; |
| 1643 | |
| 1644 | /* We need to grab ion handle while running in client thread */ |
| 1645 | for (i = 0; i < count; i++) { |
| 1646 | rc = mdp3_ppp_get_img(&req->req_list[i].src, |
| 1647 | &req->req_list[i], &req->src_data[i]); |
| 1648 | if (rc < 0 || req->src_data[i].len == 0) { |
| 1649 | pr_err("mdp_ppp: couldn't retrieve src img from mem\n"); |
| 1650 | goto parse_err_1; |
| 1651 | } |
| 1652 | |
| 1653 | rc = mdp3_ppp_get_img(&req->req_list[i].dst, |
| 1654 | &req->req_list[i], &req->dst_data[i]); |
| 1655 | if (rc < 0 || req->dst_data[i].len == 0) { |
| 1656 | mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP); |
| 1657 | pr_err("mdp_ppp: couldn't retrieve dest img from mem\n"); |
| 1658 | goto parse_err_1; |
| 1659 | } |
| 1660 | } |
| 1661 | |
| 1662 | if (async) { |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1663 | req->cur_rel_fen_fd = mdss_get_sync_fence_fd( |
| 1664 | req->cur_rel_fence); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1665 | rc = copy_to_user(req_list_header->sync.rel_fen_fd, |
| 1666 | &req->cur_rel_fen_fd, sizeof(int)); |
| 1667 | if (rc) { |
| 1668 | pr_err("%s:copy_to_user failed\n", __func__); |
| 1669 | goto parse_err_2; |
| 1670 | } |
| 1671 | } else { |
| 1672 | fence = req->cur_rel_fence; |
Krishna Manikandan | 7ef5b5c | 2018-04-27 17:09:41 +0530 | [diff] [blame] | 1673 | fence_get((struct fence *) fence); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1674 | } |
| 1675 | |
| 1676 | mdp3_ppp_req_push(req_q, req); |
| 1677 | mutex_unlock(&ppp_stat->req_mutex); |
Sachin Bhayare | 3d3767e | 2018-01-02 21:10:57 +0530 | [diff] [blame] | 1678 | kthread_queue_work(&ppp_stat->kworker, &ppp_stat->blit_work); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1679 | if (!async) { |
| 1680 | /* wait for release fence */ |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1681 | rc = mdss_wait_sync_fence(fence, |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1682 | 5 * MSEC_PER_SEC); |
| 1683 | if (rc < 0) |
| 1684 | pr_err("%s: sync blit! rc = %x\n", __func__, rc); |
| 1685 | |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1686 | mdss_put_sync_fence(fence); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1687 | fence = NULL; |
| 1688 | } |
| 1689 | return 0; |
| 1690 | |
| 1691 | parse_err_2: |
| 1692 | put_unused_fd(req->cur_rel_fen_fd); |
| 1693 | parse_err_1: |
| 1694 | for (i--; i >= 0; i--) { |
| 1695 | mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP); |
| 1696 | mdp3_put_img(&req->dst_data[i], MDP3_CLIENT_PPP); |
| 1697 | } |
| 1698 | mdp3_ppp_deinit_buf_sync(req); |
| 1699 | mutex_unlock(&ppp_stat->req_mutex); |
| 1700 | return rc; |
| 1701 | } |
| 1702 | |
| 1703 | int mdp3_ppp_res_init(struct msm_fb_data_type *mfd) |
| 1704 | { |
| 1705 | int rc; |
| 1706 | struct sched_param param = {.sched_priority = 16}; |
| 1707 | const char timeline_name[] = "mdp3_ppp"; |
| 1708 | |
| 1709 | ppp_stat = kzalloc(sizeof(struct ppp_status), GFP_KERNEL); |
| 1710 | if (!ppp_stat) |
| 1711 | return -ENOMEM; |
| 1712 | |
| 1713 | /*Setup sync_pt timeline for ppp*/ |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1714 | ppp_stat->timeline = mdss_create_timeline(timeline_name); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1715 | if (ppp_stat->timeline == NULL) { |
| 1716 | pr_err("%s: cannot create time line\n", __func__); |
| 1717 | return -ENOMEM; |
| 1718 | } |
| 1719 | ppp_stat->timeline_value = 1; |
| 1720 | |
Arun kumar | 47145e0 | 2018-03-23 22:07:51 +0530 | [diff] [blame] | 1721 | kthread_init_worker(&ppp_stat->kworker); |
| 1722 | kthread_init_work(&ppp_stat->blit_work, mdp3_ppp_blit_handler); |
Sachin Bhayare | eeb8889 | 2018-01-02 16:36:01 +0530 | [diff] [blame] | 1723 | ppp_stat->blit_thread = kthread_run(kthread_worker_fn, |
| 1724 | &ppp_stat->kworker, |
| 1725 | "mdp3_ppp"); |
| 1726 | |
| 1727 | if (IS_ERR(ppp_stat->blit_thread)) { |
| 1728 | rc = PTR_ERR(ppp_stat->blit_thread); |
| 1729 | pr_err("ERROR: unable to start ppp blit thread,err = %d\n", |
| 1730 | rc); |
| 1731 | ppp_stat->blit_thread = NULL; |
| 1732 | return rc; |
| 1733 | } |
| 1734 | if (sched_setscheduler(ppp_stat->blit_thread, SCHED_FIFO, ¶m)) |
| 1735 | pr_warn("set priority failed for mdp3 blit thread\n"); |
| 1736 | |
| 1737 | INIT_WORK(&ppp_stat->free_bw_work, mdp3_free_bw_wq_handler); |
| 1738 | init_completion(&ppp_stat->pop_q_comp); |
| 1739 | mutex_init(&ppp_stat->req_mutex); |
| 1740 | mutex_init(&ppp_stat->config_ppp_mutex); |
| 1741 | init_timer(&ppp_stat->free_bw_timer); |
| 1742 | ppp_stat->free_bw_timer.function = mdp3_free_fw_timer_func; |
| 1743 | ppp_stat->free_bw_timer.data = 0; |
| 1744 | ppp_stat->mfd = mfd; |
| 1745 | mdp3_ppp_callback_setup(); |
| 1746 | return 0; |
| 1747 | } |