blob: 545951094f738168280ab92d57497187eee01202 [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/* Copyright (c) 2007, 2013-2014, 2016-2018, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2007 Google Incorporated
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/file.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/major.h>
18#include <linux/slab.h>
19#include <linux/types.h>
20#include <linux/uaccess.h>
21#include <linux/sched.h>
22#include <linux/mutex.h>
Sachin Bhayareeeb88892018-01-02 16:36:01 +053023#include "linux/proc_fs.h"
24#include <linux/delay.h>
Krishna Manikandan7ef5b5c2018-04-27 17:09:41 +053025#include <linux/fence.h>
Sachin Bhayareeeb88892018-01-02 16:36:01 +053026
27#include "mdss_fb.h"
28#include "mdp3_ppp.h"
29#include "mdp3_hwio.h"
30#include "mdp3.h"
31#include "mdss_debug.h"
Arun kumar47145e02018-03-23 22:07:51 +053032#include "mdss_sync.h"
Sachin Bhayareeeb88892018-01-02 16:36:01 +053033
34#define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT)
35#define MDP_RELEASE_BW_TIMEOUT 50
36
37#define MDP_PPP_MAX_BPP 4
38#define MDP_PPP_DYNAMIC_FACTOR 3
39#define MDP_PPP_MAX_READ_WRITE 3
40#define MDP_PPP_MAX_WIDTH 0xFFF
41#define ENABLE_SOLID_FILL 0x2
42#define DISABLE_SOLID_FILL 0x0
43#define BLEND_LATENCY 3
44#define CSC_LATENCY 1
45
46#define YUV_BW_FUDGE_NUM 10
47#define YUV_BW_FUDGE_DEN 10
48
49struct ppp_resource ppp_res;
50
51static const bool valid_fmt[MDP_IMGTYPE_LIMIT] = {
52 [MDP_RGB_565] = true,
53 [MDP_BGR_565] = true,
54 [MDP_RGB_888] = true,
55 [MDP_BGR_888] = true,
56 [MDP_BGRA_8888] = true,
57 [MDP_RGBA_8888] = true,
58 [MDP_ARGB_8888] = true,
59 [MDP_XRGB_8888] = true,
60 [MDP_RGBX_8888] = true,
61 [MDP_Y_CRCB_H2V2] = true,
62 [MDP_Y_CBCR_H2V2] = true,
63 [MDP_Y_CBCR_H2V2_ADRENO] = true,
64 [MDP_Y_CBCR_H2V2_VENUS] = true,
65 [MDP_YCRYCB_H2V1] = true,
66 [MDP_Y_CBCR_H2V1] = true,
67 [MDP_Y_CRCB_H2V1] = true,
68 [MDP_BGRX_8888] = true,
69};
70
71#define MAX_LIST_WINDOW 16
72#define MDP3_PPP_MAX_LIST_REQ 8
73
74struct blit_req_list {
75 int count;
76 struct mdp_blit_req req_list[MAX_LIST_WINDOW];
77 struct mdp3_img_data src_data[MAX_LIST_WINDOW];
78 struct mdp3_img_data dst_data[MAX_LIST_WINDOW];
Arun kumar47145e02018-03-23 22:07:51 +053079 struct mdss_fence *acq_fen[MDP_MAX_FENCE_FD];
Sachin Bhayareeeb88892018-01-02 16:36:01 +053080 u32 acq_fen_cnt;
81 int cur_rel_fen_fd;
82 struct sync_pt *cur_rel_sync_pt;
Arun kumar47145e02018-03-23 22:07:51 +053083 struct mdss_fence *cur_rel_fence;
84 struct mdss_fence *last_rel_fence;
Sachin Bhayareeeb88892018-01-02 16:36:01 +053085};
86
87struct blit_req_queue {
88 struct blit_req_list req[MDP3_PPP_MAX_LIST_REQ];
89 int count;
90 int push_idx;
91 int pop_idx;
92};
93
94struct ppp_status {
95 bool wait_for_pop;
96 struct completion ppp_comp;
97 struct completion pop_q_comp;
98 struct mutex req_mutex; /* Protect request queue */
99 struct mutex config_ppp_mutex; /* Only one client configure register */
100 struct msm_fb_data_type *mfd;
101
102 struct kthread_work blit_work;
103 struct kthread_worker kworker;
104 struct task_struct *blit_thread;
105 struct blit_req_queue req_q;
106
Arun kumar47145e02018-03-23 22:07:51 +0530107 struct mdss_timeline *timeline;
108
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530109 int timeline_value;
110
111 struct timer_list free_bw_timer;
112 struct work_struct free_bw_work;
113 bool bw_update;
114 bool bw_on;
115 u32 mdp_clk;
116};
117
118static struct ppp_status *ppp_stat;
119static bool is_blit_optimization_possible(struct blit_req_list *req, int indx);
120
121static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
122{
123 u64 result = (val * (u64)numer);
124
125 do_div(result, denom);
126 return result;
127}
128
129int ppp_get_bpp(uint32_t format, uint32_t fb_format)
130{
131 int bpp = -EINVAL;
132
133 if (format == MDP_FB_FORMAT)
134 format = fb_format;
135
136 bpp = ppp_bpp(format);
137 if (bpp <= 0)
138 pr_err("%s incorrect format %d\n", __func__, format);
139 return bpp;
140}
141
142int mdp3_ppp_get_img(struct mdp_img *img, struct mdp_blit_req *req,
143 struct mdp3_img_data *data)
144{
145 struct msmfb_data fb_data;
146 uint32_t stride;
147 int bpp = ppp_bpp(img->format);
148
149 if (bpp <= 0) {
150 pr_err("%s incorrect format %d\n", __func__, img->format);
151 return -EINVAL;
152 }
153
154 if (img->width > MDP_PPP_MAX_WIDTH) {
155 pr_err("%s incorrect width %d\n", __func__, img->width);
156 return -EINVAL;
157 }
158
159 fb_data.flags = img->priv;
160 fb_data.memory_id = img->memory_id;
161 fb_data.offset = 0;
162
163 stride = img->width * bpp;
164 data->padding = 16 * stride;
165
166 return mdp3_get_img(&fb_data, data, MDP3_CLIENT_PPP);
167}
168
169/* Check format */
170int mdp3_ppp_verify_fmt(struct mdp_blit_req *req)
171{
172 if (MDP_IS_IMGTYPE_BAD(req->src.format) ||
173 MDP_IS_IMGTYPE_BAD(req->dst.format)) {
174 pr_err("%s: Color format out of range\n", __func__);
175 return -EINVAL;
176 }
177
178 if (!valid_fmt[req->src.format] ||
179 !valid_fmt[req->dst.format]) {
180 pr_err("%s: Color format not supported\n", __func__);
181 return -EINVAL;
182 }
183 return 0;
184}
185
186/* Check resolution */
187int mdp3_ppp_verify_res(struct mdp_blit_req *req)
188{
189 if ((req->src.width == 0) || (req->src.height == 0) ||
190 (req->src_rect.w == 0) || (req->src_rect.h == 0) ||
191 (req->dst.width == 0) || (req->dst.height == 0) ||
192 (req->dst_rect.w == 0) || (req->dst_rect.h == 0)) {
193 pr_err("%s: Height/width can't be 0\n", __func__);
194 return -EINVAL;
195 }
196
197 if (((req->src_rect.x + req->src_rect.w) > req->src.width) ||
198 ((req->src_rect.y + req->src_rect.h) > req->src.height)) {
Arun kumar162db222018-05-09 17:28:40 +0530199 pr_err("%s: src roi (x=%d,y=%d,w=%d, h=%d) WxH(%dx%d)\n",
200 __func__, req->src_rect.x, req->src_rect.y,
201 req->src_rect.w, req->src_rect.h, req->src.width,
202 req->src.height);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530203 pr_err("%s: src roi larger than boundary\n", __func__);
204 return -EINVAL;
205 }
206
207 if (((req->dst_rect.x + req->dst_rect.w) > req->dst.width) ||
208 ((req->dst_rect.y + req->dst_rect.h) > req->dst.height)) {
Arun kumar162db222018-05-09 17:28:40 +0530209 pr_err("%s: dst roi (x=%d,y=%d,w=%d, h=%d) WxH(%dx%d)\n",
210 __func__, req->dst_rect.x, req->dst_rect.y,
211 req->dst_rect.w, req->dst_rect.h, req->dst.width,
212 req->dst.height);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530213 pr_err("%s: dst roi larger than boundary\n", __func__);
214 return -EINVAL;
215 }
216 return 0;
217}
218
219/* scaling range check */
220int mdp3_ppp_verify_scale(struct mdp_blit_req *req)
221{
222 u32 src_width, src_height, dst_width, dst_height;
223
224 src_width = req->src_rect.w;
225 src_height = req->src_rect.h;
226
227 if (req->flags & MDP_ROT_90) {
228 dst_width = req->dst_rect.h;
229 dst_height = req->dst_rect.w;
230 } else {
231 dst_width = req->dst_rect.w;
232 dst_height = req->dst_rect.h;
233 }
234
235 switch (req->dst.format) {
236 case MDP_Y_CRCB_H2V2:
237 case MDP_Y_CBCR_H2V2:
238 src_width = (src_width / 2) * 2;
239 src_height = (src_height / 2) * 2;
240 dst_width = (dst_width / 2) * 2;
241 dst_height = (dst_height / 2) * 2;
242 break;
243
244 case MDP_Y_CRCB_H2V1:
245 case MDP_Y_CBCR_H2V1:
246 case MDP_YCRYCB_H2V1:
247 src_width = (src_width / 2) * 2;
248 dst_width = (dst_width / 2) * 2;
249 break;
250
251 default:
252 break;
253 }
254
255 if (((MDP_SCALE_Q_FACTOR * dst_width) / src_width >
256 MDP_MAX_X_SCALE_FACTOR)
257 || ((MDP_SCALE_Q_FACTOR * dst_width) / src_width <
258 MDP_MIN_X_SCALE_FACTOR)) {
259 pr_err("%s: x req scale factor beyond capability\n", __func__);
260 return -EINVAL;
261 }
262
263 if (((MDP_SCALE_Q_FACTOR * dst_height) / src_height >
264 MDP_MAX_Y_SCALE_FACTOR)
265 || ((MDP_SCALE_Q_FACTOR * dst_height) / src_height <
266 MDP_MIN_Y_SCALE_FACTOR)) {
267 pr_err("%s: y req scale factor beyond capability\n", __func__);
268 return -EINVAL;
269 }
270 return 0;
271}
272
273/* operation check */
274int mdp3_ppp_verify_op(struct mdp_blit_req *req)
275{
276 /*
277 * MDP_DEINTERLACE & MDP_SHARPENING Flags are not valid for MDP3
278 * so using them together for MDP_SMART_BLIT.
279 */
280 if ((req->flags & MDP_SMART_BLIT) == MDP_SMART_BLIT)
281 return 0;
282 if (req->flags & MDP_DEINTERLACE) {
283 pr_err("\n%s(): deinterlace not supported", __func__);
284 return -EINVAL;
285 }
286
287 if (req->flags & MDP_SHARPENING) {
288 pr_err("\n%s(): sharpening not supported", __func__);
289 return -EINVAL;
290 }
291 return 0;
292}
293
294int mdp3_ppp_verify_req(struct mdp_blit_req *req)
295{
296 int rc;
297
298 if (req == NULL) {
299 pr_err("%s: req == null\n", __func__);
300 return -EINVAL;
301 }
302
303 rc = mdp3_ppp_verify_fmt(req);
304 rc |= mdp3_ppp_verify_res(req);
305 rc |= mdp3_ppp_verify_scale(req);
306 rc |= mdp3_ppp_verify_op(req);
307
308 return rc;
309}
310
311int mdp3_ppp_pipe_wait(void)
312{
313 int ret = 1;
314
315 /*
316 * wait 200 ms for ppp operation to complete before declaring
317 * the MDP hung
318 */
319 ret = wait_for_completion_timeout(
320 &ppp_stat->ppp_comp, msecs_to_jiffies(200));
321 if (!ret)
322 pr_err("%s: Timed out waiting for the MDP.\n",
323 __func__);
324
325 return ret;
326}
327
328uint32_t mdp3_calc_tpval(struct ppp_img_desc *img, uint32_t old_tp)
329{
330 uint32_t tpVal;
331 uint8_t plane_tp;
332
333 tpVal = 0;
334 if ((img->color_fmt == MDP_RGB_565)
335 || (img->color_fmt == MDP_BGR_565)) {
336 /* transparent color conversion into 24 bpp */
337 plane_tp = (uint8_t) ((old_tp & 0xF800) >> 11);
338 tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 16;
339 plane_tp = (uint8_t) (old_tp & 0x1F);
340 tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 8;
341
342 plane_tp = (uint8_t) ((old_tp & 0x7E0) >> 5);
343 tpVal |= ((plane_tp << 2) | ((plane_tp & 0x30) >> 4));
344 } else {
345 /* 24bit RGB to RBG conversion */
346 tpVal = (old_tp & 0xFF00) >> 8;
347 tpVal |= (old_tp & 0xFF) << 8;
348 tpVal |= (old_tp & 0xFF0000);
349 }
350
351 return tpVal;
352}
353
354static void mdp3_ppp_intr_handler(int type, void *arg)
355{
356 complete(&ppp_stat->ppp_comp);
357}
358
359static int mdp3_ppp_callback_setup(void)
360{
361 int rc;
362 struct mdp3_intr_cb ppp_done_cb = {
363 .cb = mdp3_ppp_intr_handler,
364 .data = NULL,
365 };
366
367 rc = mdp3_set_intr_callback(MDP3_PPP_DONE, &ppp_done_cb);
368 return rc;
369}
370
371void mdp3_ppp_kickoff(void)
372{
373 init_completion(&ppp_stat->ppp_comp);
374 mdp3_irq_enable(MDP3_PPP_DONE);
375 ppp_enable();
376 ATRACE_BEGIN("mdp3_wait_for_ppp_comp");
377 mdp3_ppp_pipe_wait();
378 ATRACE_END("mdp3_wait_for_ppp_comp");
379 mdp3_irq_disable(MDP3_PPP_DONE);
380}
381
382struct bpp_info {
383 int bpp_num;
384 int bpp_den;
385 int bpp_pln;
386};
387
388int mdp3_get_bpp_info(int format, struct bpp_info *bpp)
389{
390 int rc = 0;
391
392 switch (format) {
393 case MDP_RGB_565:
394 case MDP_BGR_565:
395 bpp->bpp_num = 2;
396 bpp->bpp_den = 1;
397 bpp->bpp_pln = 2;
398 break;
399 case MDP_RGB_888:
400 case MDP_BGR_888:
401 bpp->bpp_num = 3;
402 bpp->bpp_den = 1;
403 bpp->bpp_pln = 3;
404 break;
405 case MDP_BGRA_8888:
406 case MDP_RGBA_8888:
407 case MDP_ARGB_8888:
408 case MDP_XRGB_8888:
409 case MDP_RGBX_8888:
410 case MDP_BGRX_8888:
411 bpp->bpp_num = 4;
412 bpp->bpp_den = 1;
413 bpp->bpp_pln = 4;
414 break;
415 case MDP_Y_CRCB_H2V2:
416 case MDP_Y_CBCR_H2V2:
417 case MDP_Y_CBCR_H2V2_ADRENO:
418 case MDP_Y_CBCR_H2V2_VENUS:
419 bpp->bpp_num = 3;
420 bpp->bpp_den = 2;
421 bpp->bpp_pln = 1;
422 break;
423 case MDP_Y_CBCR_H2V1:
424 case MDP_Y_CRCB_H2V1:
425 bpp->bpp_num = 2;
426 bpp->bpp_den = 1;
427 bpp->bpp_pln = 1;
428 break;
429 case MDP_YCRYCB_H2V1:
430 bpp->bpp_num = 2;
431 bpp->bpp_den = 1;
432 bpp->bpp_pln = 2;
433 break;
434 default:
435 rc = -EINVAL;
436 }
437 return rc;
438}
439
440bool mdp3_is_blend(struct mdp_blit_req *req)
441{
442 if ((req->transp_mask != MDP_TRANSP_NOP) ||
443 (req->alpha < MDP_ALPHA_NOP) ||
444 (req->src.format == MDP_ARGB_8888) ||
445 (req->src.format == MDP_BGRA_8888) ||
446 (req->src.format == MDP_RGBA_8888))
447 return true;
448 return false;
449}
450
451bool mdp3_is_scale(struct mdp_blit_req *req)
452{
453 if (req->flags & MDP_ROT_90) {
454 if (req->src_rect.w != req->dst_rect.h ||
455 req->src_rect.h != req->dst_rect.w)
456 return true;
457 } else {
458 if (req->src_rect.h != req->dst_rect.h ||
459 req->src_rect.w != req->dst_rect.w)
460 return true;
461 }
462 return false;
463}
464
465u32 mdp3_clk_calc(struct msm_fb_data_type *mfd,
466 struct blit_req_list *lreq, u32 fps)
467{
468 int i, lcount = 0;
469 struct mdp_blit_req *req;
470 u64 mdp_clk_rate = 0;
471 u32 scale_x = 0, scale_y = 0, scale = 0;
472 u32 blend_l, csc_l;
473
474 lcount = lreq->count;
475
476 blend_l = 100 * BLEND_LATENCY;
477 csc_l = 100 * CSC_LATENCY;
478
479 for (i = 0; i < lcount; i++) {
480 req = &(lreq->req_list[i]);
481
482 if (req->flags & MDP_SMART_BLIT)
483 continue;
484
485 if (mdp3_is_scale(req)) {
486 if (req->flags & MDP_ROT_90) {
487 scale_x = 100 * req->src_rect.h /
488 req->dst_rect.w;
489 scale_y = 100 * req->src_rect.w /
490 req->dst_rect.h;
491 } else {
492 scale_x = 100 * req->src_rect.w /
493 req->dst_rect.w;
494 scale_y = 100 * req->src_rect.h /
495 req->dst_rect.h;
496 }
497 scale = max(scale_x, scale_y);
498 }
499 scale = scale >= 100 ? scale : 100;
500 if (mdp3_is_blend(req))
501 scale = max(scale, blend_l);
502
503 if (!check_if_rgb(req->src.format))
504 scale = max(scale, csc_l);
505
506 mdp_clk_rate += (req->src_rect.w * req->src_rect.h *
507 scale / 100) * fps;
508 }
509 mdp_clk_rate += (ppp_res.solid_fill_pixel * fps);
510 mdp_clk_rate = fudge_factor(mdp_clk_rate,
511 CLK_FUDGE_NUM, CLK_FUDGE_DEN);
512 pr_debug("mdp_clk_rate for ppp = %llu\n", mdp_clk_rate);
513 mdp_clk_rate = mdp3_clk_round_off(mdp_clk_rate);
514
515 return mdp_clk_rate;
516}
517
518u64 mdp3_adjust_scale_factor(struct mdp_blit_req *req, u32 bw_req, int bpp)
519{
520 int src_h, src_w;
521 int dst_h, dst_w;
522
523 src_h = req->src_rect.h;
524 src_w = req->src_rect.w;
525
526 dst_h = req->dst_rect.h;
527 dst_w = req->dst_rect.w;
528
529 if ((!(req->flags & MDP_ROT_90) && src_h == dst_h &&
530 src_w == dst_w) || ((req->flags & MDP_ROT_90) &&
531 src_h == dst_w && src_w == dst_h))
532 return bw_req;
533
534 bw_req = (bw_req + (bw_req * dst_h) / (4 * src_h));
535 bw_req = (bw_req + (bw_req * dst_w) / (4 * src_w) +
536 (bw_req * dst_w) / (bpp * src_w));
537 return bw_req;
538}
539
540int mdp3_calc_ppp_res(struct msm_fb_data_type *mfd,
541 struct blit_req_list *lreq)
542{
543 struct mdss_panel_info *panel_info = mfd->panel_info;
544 int i, lcount = 0;
Arun kumardb962812018-05-30 16:31:52 +0530545 int frame_rate = DEFAULT_FRAME_RATE;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530546 struct mdp_blit_req *req;
547 struct bpp_info bpp;
548 u64 old_solid_fill_pixel = 0;
549 u64 new_solid_fill_pixel = 0;
550 u64 src_read_bw = 0;
551 u32 bg_read_bw = 0;
552 u32 dst_write_bw = 0;
553 u64 honest_ppp_ab = 0;
554 u32 fps = 0;
555 int smart_blit_fg_indx = -1;
556 u32 smart_blit_bg_read_bw = 0;
557
558 ATRACE_BEGIN(__func__);
559 lcount = lreq->count;
Arun kumardb962812018-05-30 16:31:52 +0530560 frame_rate = mdss_panel_get_framerate(panel_info, FPS_RESOLUTION_HZ);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530561 if (lcount == 0) {
562 pr_err("Blit with request count 0, continue to recover!!!\n");
563 ATRACE_END(__func__);
564 return 0;
565 }
566 if (lreq->req_list[0].flags & MDP_SOLID_FILL) {
567 req = &(lreq->req_list[0]);
568 mdp3_get_bpp_info(req->dst.format, &bpp);
569 old_solid_fill_pixel = ppp_res.solid_fill_pixel;
570 new_solid_fill_pixel = req->dst_rect.w * req->dst_rect.h;
571 ppp_res.solid_fill_pixel += new_solid_fill_pixel;
572 ppp_res.solid_fill_byte += req->dst_rect.w * req->dst_rect.h *
573 bpp.bpp_num / bpp.bpp_den;
574 if ((old_solid_fill_pixel >= new_solid_fill_pixel) ||
575 (mdp3_res->solid_fill_vote_en)) {
576 pr_debug("Last fill pixels are higher or fill_en %d\n",
577 mdp3_res->solid_fill_vote_en);
578 ATRACE_END(__func__);
579 return 0;
580 }
581 }
582
583 for (i = 0; i < lcount; i++) {
584 /* Set Smart blit flag before BW calculation */
585 is_blit_optimization_possible(lreq, i);
586 req = &(lreq->req_list[i]);
587
Arun kumardb962812018-05-30 16:31:52 +0530588 if (req->fps > 0 && req->fps <= frame_rate) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530589 if (fps == 0)
590 fps = req->fps;
591 else
Arun kumardb962812018-05-30 16:31:52 +0530592 fps = frame_rate;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530593 }
594
595 mdp3_get_bpp_info(req->src.format, &bpp);
596 if (lreq->req_list[i].flags & MDP_SMART_BLIT) {
597 /*
598 * Flag for smart blit FG layer index
599 * If blit request at index "n" has
600 * MDP_SMART_BLIT flag set then it will be used as BG
601 * layer in smart blit and request at index "n+1"
602 * will be used as FG layer
603 */
604 smart_blit_fg_indx = i + 1;
605 bg_read_bw = req->src_rect.w * req->src_rect.h *
606 bpp.bpp_num / bpp.bpp_den;
607 bg_read_bw = mdp3_adjust_scale_factor(req,
608 bg_read_bw, bpp.bpp_pln);
609 /* Cache read BW of smart blit BG layer */
610 smart_blit_bg_read_bw = bg_read_bw;
611 } else {
612 src_read_bw = req->src_rect.w * req->src_rect.h *
613 bpp.bpp_num / bpp.bpp_den;
614 src_read_bw = mdp3_adjust_scale_factor(req,
615 src_read_bw, bpp.bpp_pln);
616 if (!(check_if_rgb(req->src.format))) {
617 src_read_bw = fudge_factor(src_read_bw,
618 YUV_BW_FUDGE_NUM,
619 YUV_BW_FUDGE_DEN);
620 }
621 mdp3_get_bpp_info(req->dst.format, &bpp);
622
623 if (smart_blit_fg_indx == i) {
624 bg_read_bw = smart_blit_bg_read_bw;
625 smart_blit_fg_indx = -1;
626 } else {
627 if ((req->transp_mask != MDP_TRANSP_NOP) ||
628 (req->alpha < MDP_ALPHA_NOP) ||
629 (req->src.format == MDP_ARGB_8888) ||
630 (req->src.format == MDP_BGRA_8888) ||
631 (req->src.format == MDP_RGBA_8888)) {
632 bg_read_bw = req->dst_rect.w *
633 req->dst_rect.h *
634 bpp.bpp_num / bpp.bpp_den;
635 bg_read_bw = mdp3_adjust_scale_factor(
636 req, bg_read_bw,
637 bpp.bpp_pln);
638 } else {
639 bg_read_bw = 0;
640 }
641 }
642 dst_write_bw = req->dst_rect.w * req->dst_rect.h *
643 bpp.bpp_num / bpp.bpp_den;
644 honest_ppp_ab += (src_read_bw + bg_read_bw +
645 dst_write_bw);
646 }
647 }
648
649 if (fps == 0)
Arun kumardb962812018-05-30 16:31:52 +0530650 fps = frame_rate;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530651
652 if (lreq->req_list[0].flags & MDP_SOLID_FILL) {
653 honest_ppp_ab = ppp_res.solid_fill_byte * 4;
654 pr_debug("solid fill honest_ppp_ab %llu\n", honest_ppp_ab);
655 } else {
656 honest_ppp_ab += ppp_res.solid_fill_byte;
657 mdp3_res->solid_fill_vote_en = true;
658 }
659
660 honest_ppp_ab = honest_ppp_ab * fps;
661 if (honest_ppp_ab != ppp_res.next_ab) {
662 ppp_res.next_ab = honest_ppp_ab;
663 ppp_res.next_ib = honest_ppp_ab;
664 ppp_stat->bw_update = true;
665 pr_debug("solid fill ab = %llx, total ab = %llx ",
666 (ppp_res.solid_fill_byte * fps), honest_ppp_ab);
667 pr_debug("(%d fps) Solid_fill_vote %d\n",
668 fps, mdp3_res->solid_fill_vote_en);
669 ATRACE_INT("mdp3_ppp_bus_quota", honest_ppp_ab);
670 }
671 ppp_res.clk_rate = mdp3_clk_calc(mfd, lreq, fps);
672 ATRACE_INT("mdp3_ppp_clk_rate", ppp_res.clk_rate);
673 ATRACE_END(__func__);
674 return 0;
675}
676
677int mdp3_ppp_turnon(struct msm_fb_data_type *mfd, int on_off)
678{
679 uint64_t ab = 0, ib = 0;
680 int rate = 0;
681 int rc;
682
683 if (on_off) {
684 rate = ppp_res.clk_rate;
685 ab = ppp_res.next_ab;
686 ib = ppp_res.next_ib;
687 }
688 mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, rate, MDP3_CLIENT_PPP);
689 rc = mdp3_res_update(on_off, 0, MDP3_CLIENT_PPP);
690 if (rc < 0) {
691 pr_err("%s: mdp3_clk_enable failed\n", __func__);
692 return rc;
693 }
694 rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP, ab, ib);
695 if (rc < 0) {
696 mdp3_res_update(!on_off, 0, MDP3_CLIENT_PPP);
697 pr_err("%s: scale_set_quota failed\n", __func__);
698 return rc;
699 }
700 ppp_stat->bw_on = on_off;
701 ppp_stat->mdp_clk = MDP_CORE_CLK_RATE_SVS;
702 ppp_stat->bw_update = false;
703 return 0;
704}
705
706void mdp3_start_ppp(struct ppp_blit_op *blit_op)
707{
708 /* Wait for the pipe to clear */
709 if (MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS) &
710 MDP3_PPP_ACTIVE) {
711 pr_err("ppp core is hung up on previous request\n");
712 return;
713 }
714 config_ppp_op_mode(blit_op);
715 if (blit_op->solid_fill) {
716 MDP3_REG_WRITE(0x10138, 0x10000000);
717 MDP3_REG_WRITE(0x1014c, 0xffffffff);
718 MDP3_REG_WRITE(0x101b8, 0);
719 MDP3_REG_WRITE(0x101bc, 0);
720 MDP3_REG_WRITE(0x1013c, 0);
721 MDP3_REG_WRITE(0x10140, 0);
722 MDP3_REG_WRITE(0x10144, 0);
723 MDP3_REG_WRITE(0x10148, 0);
724 MDP3_REG_WRITE(MDP3_TFETCH_FILL_COLOR,
725 blit_op->solid_fill_color);
726 MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL,
727 ENABLE_SOLID_FILL);
728 } else {
729 MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL,
730 DISABLE_SOLID_FILL);
731 }
732 /* Skip PPP kickoff for SMART_BLIT BG layer */
733 if (blit_op->mdp_op & MDPOP_SMART_BLIT)
734 pr_debug("Skip mdp3_ppp_kickoff\n");
735 else
736 mdp3_ppp_kickoff();
737
738 if (!(blit_op->solid_fill)) {
739 ppp_res.solid_fill_pixel = 0;
740 ppp_res.solid_fill_byte = 0;
741 }
742}
743
744static int solid_fill_workaround(struct mdp_blit_req *req,
745 struct ppp_blit_op *blit_op)
746{
747 /* Make width 2 when there is a solid fill of width 1, and make
748 * sure width does not become zero while trying to avoid odd width
749 */
750 if (blit_op->dst.roi.width == 1) {
751 if (req->dst_rect.x + 2 > req->dst.width) {
752 pr_err("%s: Unable to handle solid fill of width 1",
753 __func__);
754 return -EINVAL;
755 }
756 blit_op->dst.roi.width = 2;
757 }
758 if (blit_op->src.roi.width == 1) {
759 if (req->src_rect.x + 2 > req->src.width) {
760 pr_err("%s: Unable to handle solid fill of width 1",
761 __func__);
762 return -EINVAL;
763 }
764 blit_op->src.roi.width = 2;
765 }
766
767 /* Avoid odd width, as it could hang ppp during solid fill */
768 blit_op->dst.roi.width = (blit_op->dst.roi.width / 2) * 2;
769 blit_op->src.roi.width = (blit_op->src.roi.width / 2) * 2;
770
771 /* Set src format to RGBX, to avoid ppp hang issues */
772 blit_op->src.color_fmt = MDP_RGBX_8888;
773
774 /* Avoid RGBA format, as it could hang ppp during solid fill */
775 if (blit_op->dst.color_fmt == MDP_RGBA_8888)
776 blit_op->dst.color_fmt = MDP_RGBX_8888;
777 return 0;
778}
779
780static int mdp3_ppp_process_req(struct ppp_blit_op *blit_op,
781 struct mdp_blit_req *req, struct mdp3_img_data *src_data,
782 struct mdp3_img_data *dst_data)
783{
784 unsigned long srcp0_start, srcp0_len, dst_start, dst_len;
785 uint32_t dst_width, dst_height;
786 int ret = 0;
787
788 srcp0_start = (unsigned long) src_data->addr;
789 srcp0_len = (unsigned long) src_data->len;
790 dst_start = (unsigned long) dst_data->addr;
791 dst_len = (unsigned long) dst_data->len;
792
793 blit_op->dst.prop.width = req->dst.width;
794 blit_op->dst.prop.height = req->dst.height;
795
796 blit_op->dst.color_fmt = req->dst.format;
797 blit_op->dst.p0 = (void *) dst_start;
798 blit_op->dst.p0 += req->dst.offset;
799
800 blit_op->dst.roi.x = req->dst_rect.x;
801 blit_op->dst.roi.y = req->dst_rect.y;
802 blit_op->dst.roi.width = req->dst_rect.w;
803 blit_op->dst.roi.height = req->dst_rect.h;
804
805 blit_op->src.roi.x = req->src_rect.x;
806 blit_op->src.roi.y = req->src_rect.y;
807 blit_op->src.roi.width = req->src_rect.w;
808 blit_op->src.roi.height = req->src_rect.h;
809
810 blit_op->src.prop.width = req->src.width;
811 blit_op->src.prop.height = req->src.height;
812 blit_op->src.color_fmt = req->src.format;
813
814
815 blit_op->src.p0 = (void *) (srcp0_start + req->src.offset);
816 if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_ADRENO)
817 blit_op->src.p1 =
818 (void *) ((uint32_t) blit_op->src.p0 +
819 ALIGN((ALIGN(req->src.width, 32) *
820 ALIGN(req->src.height, 32)), 4096));
821 else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS)
822 blit_op->src.p1 =
823 (void *) ((uint32_t) blit_op->src.p0 +
824 ALIGN((ALIGN(req->src.width, 128) *
825 ALIGN(req->src.height, 32)), 4096));
826 else
827 blit_op->src.p1 = (void *) ((uint32_t) blit_op->src.p0 +
828 req->src.width * req->src.height);
829
830 if (req->flags & MDP_IS_FG)
831 blit_op->mdp_op |= MDPOP_LAYER_IS_FG;
832
833 /* blending check */
834 if (req->transp_mask != MDP_TRANSP_NOP) {
835 blit_op->mdp_op |= MDPOP_TRANSP;
836 blit_op->blend.trans_color =
837 mdp3_calc_tpval(&blit_op->src, req->transp_mask);
838 } else {
839 blit_op->blend.trans_color = 0;
840 }
841
842 req->alpha &= 0xff;
843 if (req->alpha < MDP_ALPHA_NOP) {
844 blit_op->mdp_op |= MDPOP_ALPHAB;
845 blit_op->blend.const_alpha = req->alpha;
846 } else {
847 blit_op->blend.const_alpha = 0xff;
848 }
849
850 /* rotation check */
851 if (req->flags & MDP_FLIP_LR)
852 blit_op->mdp_op |= MDPOP_LR;
853 if (req->flags & MDP_FLIP_UD)
854 blit_op->mdp_op |= MDPOP_UD;
855 if (req->flags & MDP_ROT_90)
856 blit_op->mdp_op |= MDPOP_ROT90;
857 if (req->flags & MDP_DITHER)
858 blit_op->mdp_op |= MDPOP_DITHER;
859
860 if (req->flags & MDP_BLEND_FG_PREMULT)
861 blit_op->mdp_op |= MDPOP_FG_PM_ALPHA;
862
863 /* scale check */
864 if (req->flags & MDP_ROT_90) {
865 dst_width = req->dst_rect.h;
866 dst_height = req->dst_rect.w;
867 } else {
868 dst_width = req->dst_rect.w;
869 dst_height = req->dst_rect.h;
870 }
871
872 if ((blit_op->src.roi.width != dst_width) ||
873 (blit_op->src.roi.height != dst_height))
874 blit_op->mdp_op |= MDPOP_ASCALE;
875
876 if (req->flags & MDP_BLUR)
877 blit_op->mdp_op |= MDPOP_ASCALE | MDPOP_BLUR;
878
879 if (req->flags & MDP_SOLID_FILL) {
880 ret = solid_fill_workaround(req, blit_op);
881 if (ret)
882 return ret;
883
884 blit_op->solid_fill_color = (req->const_color.g & 0xFF)|
885 (req->const_color.r & 0xFF) << 8 |
886 (req->const_color.b & 0xFF) << 16 |
887 (req->const_color.alpha & 0xFF) << 24;
888 blit_op->solid_fill = true;
889 } else {
890 blit_op->solid_fill = false;
891 }
892
893 if (req->flags & MDP_SMART_BLIT)
894 blit_op->mdp_op |= MDPOP_SMART_BLIT;
895
896 return ret;
897}
898
899static void mdp3_ppp_tile_workaround(struct ppp_blit_op *blit_op,
900 struct mdp_blit_req *req)
901{
902 int dst_h, src_w, i;
903 uint32_t mdp_op = blit_op->mdp_op;
904 void *src_p0 = blit_op->src.p0;
905 void *src_p1 = blit_op->src.p1;
906 void *dst_p0 = blit_op->dst.p0;
907
908 src_w = req->src_rect.w;
909 dst_h = blit_op->dst.roi.height;
910 /* bg tile fetching HW workaround */
911 for (i = 0; i < (req->dst_rect.h / 16); i++) {
912 /* this tile size */
913 blit_op->dst.roi.height = 16;
914 blit_op->src.roi.width =
915 (16 * req->src_rect.w) / req->dst_rect.h;
916
917 /* if it's out of scale range... */
918 if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
919 blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR)
920 blit_op->src.roi.width =
921 (MDP_SCALE_Q_FACTOR *
922 blit_op->dst.roi.height) /
923 MDP_MAX_X_SCALE_FACTOR;
924 else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
925 blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR)
926 blit_op->src.roi.width =
927 (MDP_SCALE_Q_FACTOR *
928 blit_op->dst.roi.height) /
929 MDP_MIN_X_SCALE_FACTOR;
930
931 mdp3_start_ppp(blit_op);
932
933 /* next tile location */
934 blit_op->dst.roi.y += 16;
935 blit_op->src.roi.x += blit_op->src.roi.width;
936
937 /* this is for a remainder update */
938 dst_h -= 16;
939 src_w -= blit_op->src.roi.width;
940 /* restore parameters that may have been overwritten */
941 blit_op->mdp_op = mdp_op;
942 blit_op->src.p0 = src_p0;
943 blit_op->src.p1 = src_p1;
944 blit_op->dst.p0 = dst_p0;
945 }
946
947 if ((dst_h < 0) || (src_w < 0))
948 pr_err("msm_fb: mdp_blt_ex() unexpected result! line:%d\n",
949 __LINE__);
950
951 /* remainder update */
952 if ((dst_h > 0) && (src_w > 0)) {
953 u32 tmp_v;
954
955 blit_op->dst.roi.height = dst_h;
956 blit_op->src.roi.width = src_w;
957
958 if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
959 blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR) {
960 tmp_v = (MDP_SCALE_Q_FACTOR *
961 blit_op->dst.roi.height) /
962 MDP_MAX_X_SCALE_FACTOR +
963 ((MDP_SCALE_Q_FACTOR *
964 blit_op->dst.roi.height) %
965 MDP_MAX_X_SCALE_FACTOR ? 1 : 0);
966
967 /* move x location as roi width gets bigger */
968 blit_op->src.roi.x -= tmp_v - blit_op->src.roi.width;
969 blit_op->src.roi.width = tmp_v;
970 } else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
971 blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR) {
972 tmp_v = (MDP_SCALE_Q_FACTOR *
973 blit_op->dst.roi.height) /
974 MDP_MIN_X_SCALE_FACTOR +
975 ((MDP_SCALE_Q_FACTOR *
976 blit_op->dst.roi.height) %
977 MDP_MIN_X_SCALE_FACTOR ? 1 : 0);
978 /*
979 * we don't move x location for continuity of
980 * source image
981 */
982 blit_op->src.roi.width = tmp_v;
983 }
984
985
986 mdp3_start_ppp(blit_op);
987 }
988}
989
990static int mdp3_ppp_blit(struct msm_fb_data_type *mfd,
991 struct mdp_blit_req *req, struct mdp3_img_data *src_data,
992 struct mdp3_img_data *dst_data)
993{
994 struct ppp_blit_op blit_op;
995 int ret = 0;
996
997 memset(&blit_op, 0, sizeof(blit_op));
998
999 if (req->dst.format == MDP_FB_FORMAT)
1000 req->dst.format = mfd->fb_imgType;
1001 if (req->src.format == MDP_FB_FORMAT)
1002 req->src.format = mfd->fb_imgType;
1003
1004 if (mdp3_ppp_verify_req(req)) {
1005 pr_err("%s: invalid image!\n", __func__);
1006 return -EINVAL;
1007 }
1008
1009 ret = mdp3_ppp_process_req(&blit_op, req, src_data, dst_data);
1010 if (ret) {
1011 pr_err("%s: Failed to process the blit request", __func__);
1012 return ret;
1013 }
1014
1015 if (((blit_op.mdp_op & (MDPOP_TRANSP | MDPOP_ALPHAB)) ||
1016 (req->src.format == MDP_ARGB_8888) ||
1017 (req->src.format == MDP_BGRA_8888) ||
1018 (req->src.format == MDP_RGBA_8888)) &&
1019 (blit_op.mdp_op & MDPOP_ROT90) && (req->dst_rect.w <= 16)) {
1020 mdp3_ppp_tile_workaround(&blit_op, req);
1021 } else {
1022 mdp3_start_ppp(&blit_op);
1023 }
1024
1025 return 0;
1026}
1027
1028static int mdp3_ppp_blit_workaround(struct msm_fb_data_type *mfd,
1029 struct mdp_blit_req *req, unsigned int remainder,
1030 struct mdp3_img_data *src_data,
1031 struct mdp3_img_data *dst_data)
1032{
1033 int ret;
1034 struct mdp_blit_req splitreq;
1035 int s_x_0, s_x_1, s_w_0, s_w_1, s_y_0, s_y_1, s_h_0, s_h_1;
1036 int d_x_0, d_x_1, d_w_0, d_w_1, d_y_0, d_y_1, d_h_0, d_h_1;
1037
1038 /* make new request as provide by user */
1039 splitreq = *req;
1040
1041 /* break dest roi at width*/
1042 d_y_0 = d_y_1 = req->dst_rect.y;
1043 d_h_0 = d_h_1 = req->dst_rect.h;
1044 d_x_0 = req->dst_rect.x;
1045
1046 if (remainder == 14 || remainder == 6)
1047 d_w_1 = req->dst_rect.w / 2;
1048 else
1049 d_w_1 = (req->dst_rect.w - 1) / 2 - 1;
1050
1051 d_w_0 = req->dst_rect.w - d_w_1;
1052 d_x_1 = d_x_0 + d_w_0;
1053 /* blit first region */
1054 if (((splitreq.flags & 0x07) == 0x07) ||
1055 ((splitreq.flags & 0x07) == 0x05) ||
1056 ((splitreq.flags & 0x07) == 0x02) ||
1057 ((splitreq.flags & 0x07) == 0x0)) {
1058
1059 if (splitreq.flags & MDP_ROT_90) {
1060 s_x_0 = s_x_1 = req->src_rect.x;
1061 s_w_0 = s_w_1 = req->src_rect.w;
1062 s_y_0 = req->src_rect.y;
1063 s_h_1 = (req->src_rect.h * d_w_1) /
1064 req->dst_rect.w;
1065 s_h_0 = req->src_rect.h - s_h_1;
1066 s_y_1 = s_y_0 + s_h_0;
1067 if (d_w_1 >= 8 * s_h_1) {
1068 s_h_1++;
1069 s_y_1--;
1070 }
1071 } else {
1072 s_y_0 = s_y_1 = req->src_rect.y;
1073 s_h_0 = s_h_1 = req->src_rect.h;
1074 s_x_0 = req->src_rect.x;
1075 s_w_1 = (req->src_rect.w * d_w_1) /
1076 req->dst_rect.w;
1077 s_w_0 = req->src_rect.w - s_w_1;
1078 s_x_1 = s_x_0 + s_w_0;
1079 if (d_w_1 >= 8 * s_w_1) {
1080 s_w_1++;
1081 s_x_1--;
1082 }
1083 }
1084
1085 splitreq.src_rect.h = s_h_0;
1086 splitreq.src_rect.y = s_y_0;
1087 splitreq.dst_rect.h = d_h_0;
1088 splitreq.dst_rect.y = d_y_0;
1089 splitreq.src_rect.x = s_x_0;
1090 splitreq.src_rect.w = s_w_0;
1091 splitreq.dst_rect.x = d_x_0;
1092 splitreq.dst_rect.w = d_w_0;
1093 } else {
1094 if (splitreq.flags & MDP_ROT_90) {
1095 s_x_0 = s_x_1 = req->src_rect.x;
1096 s_w_0 = s_w_1 = req->src_rect.w;
1097 s_y_0 = req->src_rect.y;
1098 s_h_1 = (req->src_rect.h * d_w_0) /
1099 req->dst_rect.w;
1100 s_h_0 = req->src_rect.h - s_h_1;
1101 s_y_1 = s_y_0 + s_h_0;
1102 if (d_w_0 >= 8 * s_h_1) {
1103 s_h_1++;
1104 s_y_1--;
1105 }
1106 } else {
1107 s_y_0 = s_y_1 = req->src_rect.y;
1108 s_h_0 = s_h_1 = req->src_rect.h;
1109 s_x_0 = req->src_rect.x;
1110 s_w_1 = (req->src_rect.w * d_w_0) /
1111 req->dst_rect.w;
1112 s_w_0 = req->src_rect.w - s_w_1;
1113 s_x_1 = s_x_0 + s_w_0;
1114 if (d_w_0 >= 8 * s_w_1) {
1115 s_w_1++;
1116 s_x_1--;
1117 }
1118 }
1119 splitreq.src_rect.h = s_h_0;
1120 splitreq.src_rect.y = s_y_0;
1121 splitreq.dst_rect.h = d_h_1;
1122 splitreq.dst_rect.y = d_y_1;
1123 splitreq.src_rect.x = s_x_0;
1124 splitreq.src_rect.w = s_w_0;
1125 splitreq.dst_rect.x = d_x_1;
1126 splitreq.dst_rect.w = d_w_1;
1127 }
1128
1129 /* No need to split in height */
1130 ret = mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data);
1131
1132 if (ret)
1133 return ret;
1134 /* blit second region */
1135 if (((splitreq.flags & 0x07) == 0x07) ||
1136 ((splitreq.flags & 0x07) == 0x05) ||
1137 ((splitreq.flags & 0x07) == 0x02) ||
1138 ((splitreq.flags & 0x07) == 0x0)) {
1139 splitreq.src_rect.h = s_h_1;
1140 splitreq.src_rect.y = s_y_1;
1141 splitreq.dst_rect.h = d_h_1;
1142 splitreq.dst_rect.y = d_y_1;
1143 splitreq.src_rect.x = s_x_1;
1144 splitreq.src_rect.w = s_w_1;
1145 splitreq.dst_rect.x = d_x_1;
1146 splitreq.dst_rect.w = d_w_1;
1147 } else {
1148 splitreq.src_rect.h = s_h_1;
1149 splitreq.src_rect.y = s_y_1;
1150 splitreq.dst_rect.h = d_h_0;
1151 splitreq.dst_rect.y = d_y_0;
1152 splitreq.src_rect.x = s_x_1;
1153 splitreq.src_rect.w = s_w_1;
1154 splitreq.dst_rect.x = d_x_0;
1155 splitreq.dst_rect.w = d_w_0;
1156 }
1157
1158 /* No need to split in height ... just width */
1159 return mdp3_ppp_blit(mfd, &splitreq, src_data, dst_data);
1160}
1161
1162int mdp3_ppp_start_blit(struct msm_fb_data_type *mfd,
1163 struct mdp_blit_req *req,
1164 struct mdp3_img_data *src_data,
1165 struct mdp3_img_data *dst_data)
1166{
1167 int ret;
1168 unsigned int remainder = 0, is_bpp_4 = 0;
1169
1170 if (unlikely(req->src_rect.h == 0 || req->src_rect.w == 0)) {
1171 pr_err("mdp_ppp: src img of zero size!\n");
1172 return -EINVAL;
1173 }
1174 if (unlikely(req->dst_rect.h == 0 || req->dst_rect.w == 0))
1175 return 0;
1176
1177 /* MDP width split workaround */
1178 remainder = (req->dst_rect.w) % 16;
1179 ret = ppp_get_bpp(req->dst.format, mfd->fb_imgType);
1180 if (ret <= 0) {
1181 pr_err("mdp_ppp: incorrect bpp!\n");
1182 return -EINVAL;
1183 }
1184 is_bpp_4 = (ret == 4) ? 1 : 0;
1185
1186 if ((is_bpp_4 && (remainder == 6 || remainder == 14)) &&
1187 !(req->flags & MDP_SOLID_FILL))
1188 ret = mdp3_ppp_blit_workaround(mfd, req, remainder,
1189 src_data, dst_data);
1190 else
1191 ret = mdp3_ppp_blit(mfd, req, src_data, dst_data);
1192 return ret;
1193}
1194
1195void mdp3_ppp_wait_for_fence(struct blit_req_list *req)
1196{
1197 int i, ret = 0;
1198
1199 ATRACE_BEGIN(__func__);
1200 /* buf sync */
1201 for (i = 0; i < req->acq_fen_cnt; i++) {
Arun kumar47145e02018-03-23 22:07:51 +05301202 ret = mdss_wait_sync_fence(req->acq_fen[i],
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301203 WAIT_FENCE_FINAL_TIMEOUT);
1204 if (ret < 0) {
1205 pr_err("%s: sync_fence_wait failed! ret = %x\n",
1206 __func__, ret);
1207 break;
1208 }
Arun kumar47145e02018-03-23 22:07:51 +05301209 mdss_put_sync_fence(req->acq_fen[i]);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301210 }
1211 ATRACE_END(__func__);
1212 if (ret < 0) {
1213 while (i < req->acq_fen_cnt) {
Arun kumar47145e02018-03-23 22:07:51 +05301214 mdss_put_sync_fence(req->acq_fen[i]);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301215 i++;
1216 }
1217 }
1218 req->acq_fen_cnt = 0;
1219}
1220
1221void mdp3_ppp_signal_timeline(struct blit_req_list *req)
1222{
Arun kumar47145e02018-03-23 22:07:51 +05301223 mdss_inc_timeline(ppp_stat->timeline, 1);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301224 MDSS_XLOG(ppp_stat->timeline->value, ppp_stat->timeline_value);
1225 req->last_rel_fence = req->cur_rel_fence;
1226 req->cur_rel_fence = 0;
1227}
1228
1229
1230static void mdp3_ppp_deinit_buf_sync(struct blit_req_list *req)
1231{
1232 int i;
1233
1234 put_unused_fd(req->cur_rel_fen_fd);
Arun kumar47145e02018-03-23 22:07:51 +05301235 mdss_put_sync_fence(req->cur_rel_fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301236 req->cur_rel_fence = NULL;
1237 req->cur_rel_fen_fd = 0;
1238 ppp_stat->timeline_value--;
1239 for (i = 0; i < req->acq_fen_cnt; i++)
Arun kumar47145e02018-03-23 22:07:51 +05301240 mdss_put_sync_fence(req->acq_fen[i]);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301241 req->acq_fen_cnt = 0;
1242}
1243
1244static int mdp3_ppp_handle_buf_sync(struct blit_req_list *req,
1245 struct mdp_buf_sync *buf_sync)
1246{
1247 int i, fence_cnt = 0, ret = 0;
1248 int acq_fen_fd[MDP_MAX_FENCE_FD];
Arun kumar47145e02018-03-23 22:07:51 +05301249 struct mdss_fence *fence;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301250
1251 if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
1252 (ppp_stat->timeline == NULL))
1253 return -EINVAL;
1254
1255 if (buf_sync->acq_fen_fd_cnt)
1256 ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
1257 buf_sync->acq_fen_fd_cnt * sizeof(int));
1258 if (ret) {
1259 pr_err("%s: copy_from_user failed\n", __func__);
1260 return ret;
1261 }
1262 for (i = 0; i < buf_sync->acq_fen_fd_cnt; i++) {
Arun kumar47145e02018-03-23 22:07:51 +05301263 fence = mdss_get_fd_sync_fence(acq_fen_fd[i]);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301264 if (fence == NULL) {
1265 pr_info("%s: null fence! i=%d fd=%d\n", __func__, i,
1266 acq_fen_fd[i]);
1267 ret = -EINVAL;
1268 break;
1269 }
1270 req->acq_fen[i] = fence;
1271 }
1272 fence_cnt = i;
1273 if (ret)
1274 goto buf_sync_err_1;
1275 req->acq_fen_cnt = fence_cnt;
1276 if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
1277 mdp3_ppp_wait_for_fence(req);
1278
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301279 MDSS_XLOG(ppp_stat->timeline_value);
Arun kumar47145e02018-03-23 22:07:51 +05301280
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301281 /* create fence */
Arun kumar47145e02018-03-23 22:07:51 +05301282 req->cur_rel_fence = mdss_get_sync_fence(ppp_stat->timeline,
1283 "ppp_fence", NULL, ppp_stat->timeline_value++);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301284 if (req->cur_rel_fence == NULL) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301285 req->cur_rel_sync_pt = NULL;
1286 pr_err("%s: cannot create fence\n", __func__);
1287 ret = -ENOMEM;
1288 goto buf_sync_err_2;
1289 }
1290 /* create fd */
1291 return ret;
1292buf_sync_err_2:
1293 ppp_stat->timeline_value--;
1294buf_sync_err_1:
1295 for (i = 0; i < fence_cnt; i++)
Arun kumar47145e02018-03-23 22:07:51 +05301296 mdss_put_sync_fence(req->acq_fen[i]);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301297 req->acq_fen_cnt = 0;
1298 return ret;
1299}
1300
1301void mdp3_ppp_req_push(struct blit_req_queue *req_q, struct blit_req_list *req)
1302{
1303 int idx = req_q->push_idx;
1304
1305 req_q->req[idx] = *req;
1306 req_q->count++;
1307 req_q->push_idx = (req_q->push_idx + 1) % MDP3_PPP_MAX_LIST_REQ;
1308}
1309
1310struct blit_req_list *mdp3_ppp_next_req(struct blit_req_queue *req_q)
1311{
1312 struct blit_req_list *req;
1313
1314 if (req_q->count == 0)
1315 return NULL;
1316 req = &req_q->req[req_q->pop_idx];
1317 return req;
1318}
1319
1320void mdp3_ppp_req_pop(struct blit_req_queue *req_q)
1321{
1322 req_q->count--;
1323 req_q->pop_idx = (req_q->pop_idx + 1) % MDP3_PPP_MAX_LIST_REQ;
1324}
1325
1326void mdp3_free_fw_timer_func(unsigned long arg)
1327{
1328 mdp3_res->solid_fill_vote_en = false;
1329 schedule_work(&ppp_stat->free_bw_work);
1330}
1331
1332static void mdp3_free_bw_wq_handler(struct work_struct *work)
1333{
1334 struct msm_fb_data_type *mfd = ppp_stat->mfd;
1335
1336 mutex_lock(&ppp_stat->config_ppp_mutex);
1337 if (ppp_stat->bw_on)
1338 mdp3_ppp_turnon(mfd, 0);
1339 mutex_unlock(&ppp_stat->config_ppp_mutex);
1340}
1341
1342static bool is_hw_workaround_needed(struct mdp_blit_req req)
1343{
1344 bool result = false;
1345 bool is_bpp_4 = false;
1346 uint32_t remainder = 0;
1347 uint32_t bpp = ppp_get_bpp(req.dst.format, ppp_stat->mfd->fb_imgType);
1348
1349 /* MDP width split workaround */
1350 remainder = (req.dst_rect.w) % 16;
1351 is_bpp_4 = (bpp == 4) ? 1 : 0;
1352 if ((is_bpp_4 && (remainder == 6 || remainder == 14)) &&
1353 !(req.flags & MDP_SOLID_FILL))
1354 result = true;
1355
1356 /* bg tile fetching HW workaround */
1357 if (((req.alpha < MDP_ALPHA_NOP) ||
1358 (req.transp_mask != MDP_TRANSP_NOP) ||
1359 (req.src.format == MDP_ARGB_8888) ||
1360 (req.src.format == MDP_BGRA_8888) ||
1361 (req.src.format == MDP_RGBA_8888)) &&
1362 (req.flags & MDP_ROT_90) && (req.dst_rect.w <= 16))
1363 result = true;
1364
1365 return result;
1366}
1367
1368static bool is_roi_equal(struct mdp_blit_req req0,
1369 struct mdp_blit_req req1)
1370{
1371 bool result = false;
1372 struct mdss_panel_info *panel_info = ppp_stat->mfd->panel_info;
1373
1374 /*
1375 * Check req0 and req1 layer destination ROI and return true if
1376 * they are equal.
1377 */
1378 if ((req0.dst_rect.x == req1.dst_rect.x) &&
1379 (req0.dst_rect.y == req1.dst_rect.y) &&
1380 (req0.dst_rect.w == req1.dst_rect.w) &&
1381 (req0.dst_rect.h == req1.dst_rect.h))
1382 result = true;
1383 /*
1384 * Layers are source cropped and cropped layer width and hight are
1385 * same panel width and height
1386 */
1387 else if ((req0.dst_rect.w == req1.dst_rect.w) &&
1388 (req0.dst_rect.h == req1.dst_rect.h) &&
1389 (req0.dst_rect.w == panel_info->xres) &&
1390 (req0.dst_rect.h == panel_info->yres))
1391 result = true;
1392
1393 return result;
1394}
1395
1396static bool is_scaling_needed(struct mdp_blit_req req)
1397{
1398 bool result = true;
1399
1400 /* Return true if layer need scaling else return false */
1401 if ((req.src_rect.w == req.dst_rect.w) &&
1402 (req.src_rect.h == req.dst_rect.h))
1403 result = false;
1404 return result;
1405}
1406
1407static bool is_blit_optimization_possible(struct blit_req_list *req, int indx)
1408{
1409 int next = indx + 1;
1410 bool status = false;
1411 struct mdp3_img_data tmp_data;
1412 bool dst_roi_equal = false;
1413 bool hw_woraround_active = false;
1414 struct mdp_blit_req bg_req;
1415 struct mdp_blit_req fg_req;
1416
1417 if (!(mdp3_res->smart_blit_en)) {
1418 pr_debug("Smart BLIT disabled from sysfs\n");
1419 return status;
1420 }
1421 if (next < req->count) {
1422 bg_req = req->req_list[indx];
1423 fg_req = req->req_list[next];
1424 hw_woraround_active = is_hw_workaround_needed(bg_req);
1425 dst_roi_equal = is_roi_equal(bg_req, fg_req);
1426 /*
1427 * Check userspace Smart BLIT Flag for current and next
1428 * request Flag for smart blit FG layer index If blit
1429 * request at index "n" has MDP_SMART_BLIT flag set then
1430 * it will be used as BG layer in smart blit
1431 * and request at index "n+1" will be used as FG layer
1432 */
1433 if ((bg_req.flags & MDP_SMART_BLIT) &&
1434 (!(fg_req.flags & MDP_SMART_BLIT)) &&
1435 (!(hw_woraround_active)))
1436 status = true;
1437 /*
1438 * Enable SMART blit between request 0(BG) & request 1(FG) when
1439 * destination ROI of BG and FG layer are same,
1440 * No scaling on BG layer
1441 * No rotation on BG Layer.
1442 * BG Layer color format is RGB and marked as MDP_IS_FG.
1443 */
1444 else if ((mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) &&
1445 (indx == 0) && (dst_roi_equal) &&
1446 (bg_req.flags & MDP_IS_FG) &&
1447 (!(is_scaling_needed(bg_req))) &&
1448 (!(bg_req.flags & (MDP_ROT_90))) &&
1449 (check_if_rgb(bg_req.src.format)) &&
1450 (!(hw_woraround_active))) {
1451 status = true;
1452 req->req_list[indx].flags |= MDP_SMART_BLIT;
1453 pr_debug("Optimize RGB Blit for Req Indx %d\n", indx);
1454 }
1455 /*
1456 * Swap BG and FG layer to enable SMART blit between request
1457 * 0(BG) & request 1(FG) when destination ROI of BG and FG
1458 * layer are same, No scaling on FG and BG layer
1459 * No rotation on FG Layer. BG Layer color format is YUV
1460 */
1461 else if ((indx == 0) &&
1462 (mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) &&
1463 (!(fg_req.flags & (MDP_ROT_90))) && (dst_roi_equal) &&
1464 (!(check_if_rgb(bg_req.src.format))) &&
1465 (!(hw_woraround_active))) {
1466 /*
Arun kumarb36a9e02018-05-23 14:05:49 +05301467 * Disable SMART blit for BG(YUV) layer when
1468 * Scaling on BG layer
1469 * Rotation on BG layer
1470 * UD flip on BG layer
1471 */
1472 if ((is_scaling_needed(bg_req)) && (
1473 bg_req.flags & MDP_ROT_90) &&
1474 (bg_req.flags & MDP_FLIP_UD)) {
1475 pr_debug("YUV layer with ROT+UD_FLIP+Scaling Not supported\n");
1476 return false;
1477 }
1478 /*
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301479 * swap blit requests at index 0 and 1. YUV layer at
1480 * index 0 is replaced with UI layer request present
1481 * at index 1. Since UI layer will be in background
1482 * set IS_FG flag and clear it from YUV layer flags
1483 */
1484 if (!(is_scaling_needed(req->req_list[next]))) {
1485 if (bg_req.flags & MDP_IS_FG) {
1486 req->req_list[indx].flags &=
1487 ~MDP_IS_FG;
1488 req->req_list[next].flags |= MDP_IS_FG;
1489 }
1490 bg_req = req->req_list[next];
1491 req->req_list[next] = req->req_list[indx];
1492 req->req_list[indx] = bg_req;
1493
1494 tmp_data = req->src_data[next];
1495 req->src_data[next] = req->src_data[indx];
1496 req->src_data[indx] = tmp_data;
1497
1498 tmp_data = req->dst_data[next];
1499 req->dst_data[next] = req->dst_data[indx];
1500 req->dst_data[indx] = tmp_data;
1501 status = true;
1502 req->req_list[indx].flags |= MDP_SMART_BLIT;
1503 pr_debug("Optimize YUV Blit for Req Indx %d\n",
1504 indx);
1505 }
1506 }
1507 }
1508 return status;
1509}
1510
1511static void mdp3_ppp_blit_handler(struct kthread_work *work)
1512{
1513 struct msm_fb_data_type *mfd = ppp_stat->mfd;
1514 struct blit_req_list *req;
1515 int i, rc = 0;
1516 bool smart_blit = false;
1517 int smart_blit_fg_index = -1;
1518
1519 mutex_lock(&ppp_stat->config_ppp_mutex);
1520 req = mdp3_ppp_next_req(&ppp_stat->req_q);
1521 if (!req) {
1522 mutex_unlock(&ppp_stat->config_ppp_mutex);
1523 return;
1524 }
1525
1526 if (!ppp_stat->bw_on) {
1527 mdp3_ppp_turnon(mfd, 1);
1528 if (rc < 0) {
1529 mutex_unlock(&ppp_stat->config_ppp_mutex);
1530 pr_err("%s: Enable ppp resources failed\n", __func__);
1531 return;
1532 }
1533 }
1534 while (req) {
1535 mdp3_ppp_wait_for_fence(req);
1536 mdp3_calc_ppp_res(mfd, req);
1537 if (ppp_res.clk_rate != ppp_stat->mdp_clk) {
1538 ppp_stat->mdp_clk = ppp_res.clk_rate;
1539 mdp3_clk_set_rate(MDP3_CLK_MDP_SRC,
1540 ppp_stat->mdp_clk, MDP3_CLIENT_PPP);
1541 }
1542 if (ppp_stat->bw_update) {
1543 rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_PPP,
1544 ppp_res.next_ab, ppp_res.next_ib);
1545 if (rc < 0) {
1546 pr_err("%s: bw set quota failed\n", __func__);
1547 return;
1548 }
1549 ppp_stat->bw_update = false;
1550 }
1551 ATRACE_BEGIN("mpd3_ppp_start");
1552 for (i = 0; i < req->count; i++) {
1553 smart_blit = is_blit_optimization_possible(req, i);
1554 if (smart_blit)
1555 /*
1556 * Blit request index of FG layer in
1557 * smart blit
1558 */
1559 smart_blit_fg_index = i + 1;
1560 if (!(req->req_list[i].flags & MDP_NO_BLIT)) {
1561 /* Do the actual blit. */
1562 if (!rc) {
1563 rc = mdp3_ppp_start_blit(mfd,
1564 &(req->req_list[i]),
1565 &req->src_data[i],
1566 &req->dst_data[i]);
1567 }
1568 /* Unmap blit source buffer */
1569 if (smart_blit == false) {
1570 mdp3_put_img(&req->src_data[i],
1571 MDP3_CLIENT_PPP);
1572 }
1573 if (smart_blit_fg_index == i) {
1574 /* Unmap smart blit BG buffer */
1575 mdp3_put_img(&req->src_data[i - 1],
1576 MDP3_CLIENT_PPP);
1577 smart_blit_fg_index = -1;
1578 }
1579 mdp3_put_img(&req->dst_data[i],
1580 MDP3_CLIENT_PPP);
1581 smart_blit = false;
1582 }
1583 }
1584 ATRACE_END("mdp3_ppp_start");
1585 /* Signal to release fence */
1586 mutex_lock(&ppp_stat->req_mutex);
1587 mdp3_ppp_signal_timeline(req);
1588 mdp3_ppp_req_pop(&ppp_stat->req_q);
1589 req = mdp3_ppp_next_req(&ppp_stat->req_q);
1590 if (ppp_stat->wait_for_pop)
1591 complete(&ppp_stat->pop_q_comp);
1592 mutex_unlock(&ppp_stat->req_mutex);
1593 }
1594 mod_timer(&ppp_stat->free_bw_timer, jiffies +
1595 msecs_to_jiffies(MDP_RELEASE_BW_TIMEOUT));
1596 mutex_unlock(&ppp_stat->config_ppp_mutex);
1597}
1598
1599int mdp3_ppp_parse_req(void __user *p,
1600 struct mdp_async_blit_req_list *req_list_header,
1601 int async)
1602{
1603 struct blit_req_list *req;
1604 struct blit_req_queue *req_q = &ppp_stat->req_q;
Arun kumar47145e02018-03-23 22:07:51 +05301605 struct mdss_fence *fence = NULL;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301606 int count, rc, idx, i;
1607
1608 count = req_list_header->count;
1609
1610 mutex_lock(&ppp_stat->req_mutex);
1611 while (req_q->count >= MDP3_PPP_MAX_LIST_REQ) {
1612 ppp_stat->wait_for_pop = true;
1613 mutex_unlock(&ppp_stat->req_mutex);
1614 rc = wait_for_completion_timeout(
1615 &ppp_stat->pop_q_comp, 5 * HZ);
1616 if (rc == 0) {
1617 /* This will only occur if there is serious problem */
1618 pr_err("%s: timeout exiting queuing request\n",
1619 __func__);
1620 return -EBUSY;
1621 }
1622 mutex_lock(&ppp_stat->req_mutex);
1623 ppp_stat->wait_for_pop = false;
1624 }
1625 idx = req_q->push_idx;
1626 req = &req_q->req[idx];
1627
1628 if (copy_from_user(&req->req_list, p,
1629 sizeof(struct mdp_blit_req) * count)) {
1630 mutex_unlock(&ppp_stat->req_mutex);
1631 return -EFAULT;
1632 }
1633
1634 rc = mdp3_ppp_handle_buf_sync(req, &req_list_header->sync);
1635 if (rc < 0) {
1636 pr_err("%s: Failed create sync point\n", __func__);
1637 mutex_unlock(&ppp_stat->req_mutex);
1638 return rc;
1639 }
1640 req->count = count;
1641
1642 /* We need to grab ion handle while running in client thread */
1643 for (i = 0; i < count; i++) {
1644 rc = mdp3_ppp_get_img(&req->req_list[i].src,
1645 &req->req_list[i], &req->src_data[i]);
1646 if (rc < 0 || req->src_data[i].len == 0) {
1647 pr_err("mdp_ppp: couldn't retrieve src img from mem\n");
1648 goto parse_err_1;
1649 }
1650
1651 rc = mdp3_ppp_get_img(&req->req_list[i].dst,
1652 &req->req_list[i], &req->dst_data[i]);
1653 if (rc < 0 || req->dst_data[i].len == 0) {
1654 mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
1655 pr_err("mdp_ppp: couldn't retrieve dest img from mem\n");
1656 goto parse_err_1;
1657 }
1658 }
1659
1660 if (async) {
Arun kumar47145e02018-03-23 22:07:51 +05301661 req->cur_rel_fen_fd = mdss_get_sync_fence_fd(
1662 req->cur_rel_fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301663 rc = copy_to_user(req_list_header->sync.rel_fen_fd,
1664 &req->cur_rel_fen_fd, sizeof(int));
1665 if (rc) {
1666 pr_err("%s:copy_to_user failed\n", __func__);
1667 goto parse_err_2;
1668 }
1669 } else {
1670 fence = req->cur_rel_fence;
Krishna Manikandan7ef5b5c2018-04-27 17:09:41 +05301671 fence_get((struct fence *) fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301672 }
1673
1674 mdp3_ppp_req_push(req_q, req);
1675 mutex_unlock(&ppp_stat->req_mutex);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301676 kthread_queue_work(&ppp_stat->kworker, &ppp_stat->blit_work);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301677 if (!async) {
1678 /* wait for release fence */
Arun kumar47145e02018-03-23 22:07:51 +05301679 rc = mdss_wait_sync_fence(fence,
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301680 5 * MSEC_PER_SEC);
1681 if (rc < 0)
1682 pr_err("%s: sync blit! rc = %x\n", __func__, rc);
1683
Arun kumar47145e02018-03-23 22:07:51 +05301684 mdss_put_sync_fence(fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301685 fence = NULL;
1686 }
1687 return 0;
1688
1689parse_err_2:
1690 put_unused_fd(req->cur_rel_fen_fd);
1691parse_err_1:
1692 for (i--; i >= 0; i--) {
1693 mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
1694 mdp3_put_img(&req->dst_data[i], MDP3_CLIENT_PPP);
1695 }
1696 mdp3_ppp_deinit_buf_sync(req);
1697 mutex_unlock(&ppp_stat->req_mutex);
1698 return rc;
1699}
1700
1701int mdp3_ppp_res_init(struct msm_fb_data_type *mfd)
1702{
1703 int rc;
1704 struct sched_param param = {.sched_priority = 16};
1705 const char timeline_name[] = "mdp3_ppp";
1706
1707 ppp_stat = kzalloc(sizeof(struct ppp_status), GFP_KERNEL);
1708 if (!ppp_stat)
1709 return -ENOMEM;
1710
1711 /*Setup sync_pt timeline for ppp*/
Arun kumar47145e02018-03-23 22:07:51 +05301712 ppp_stat->timeline = mdss_create_timeline(timeline_name);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301713 if (ppp_stat->timeline == NULL) {
1714 pr_err("%s: cannot create time line\n", __func__);
1715 return -ENOMEM;
1716 }
1717 ppp_stat->timeline_value = 1;
1718
Arun kumar47145e02018-03-23 22:07:51 +05301719 kthread_init_worker(&ppp_stat->kworker);
1720 kthread_init_work(&ppp_stat->blit_work, mdp3_ppp_blit_handler);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301721 ppp_stat->blit_thread = kthread_run(kthread_worker_fn,
1722 &ppp_stat->kworker,
1723 "mdp3_ppp");
1724
1725 if (IS_ERR(ppp_stat->blit_thread)) {
1726 rc = PTR_ERR(ppp_stat->blit_thread);
1727 pr_err("ERROR: unable to start ppp blit thread,err = %d\n",
1728 rc);
1729 ppp_stat->blit_thread = NULL;
1730 return rc;
1731 }
1732 if (sched_setscheduler(ppp_stat->blit_thread, SCHED_FIFO, &param))
1733 pr_warn("set priority failed for mdp3 blit thread\n");
1734
1735 INIT_WORK(&ppp_stat->free_bw_work, mdp3_free_bw_wq_handler);
1736 init_completion(&ppp_stat->pop_q_comp);
1737 mutex_init(&ppp_stat->req_mutex);
1738 mutex_init(&ppp_stat->config_ppp_mutex);
1739 init_timer(&ppp_stat->free_bw_timer);
1740 ppp_stat->free_bw_timer.function = mdp3_free_fw_timer_func;
1741 ppp_stat->free_bw_timer.data = 0;
1742 ppp_stat->mfd = mfd;
1743 mdp3_ppp_callback_setup();
1744 return 0;
1745}