blob: 24ed7f9a1779120e10b728488f5832985985bca9 [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/dma-buf.h>
17#include <linux/dma-mapping.h>
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/major.h>
21#include <linux/module.h>
22#include <linux/pm_runtime.h>
23#include <linux/uaccess.h>
24#include <linux/delay.h>
25#include <linux/msm_mdp.h>
26#include <linux/memblock.h>
27#include <linux/sort.h>
28#include <linux/sw_sync.h>
29#include <linux/kmemleak.h>
Sachin Bhayare3d3767e2018-01-02 21:10:57 +053030#include <linux/kthread.h>
Sachin Bhayareeeb88892018-01-02 16:36:01 +053031#include <asm/div64.h>
32
33#include <soc/qcom/event_timer.h>
34#include <linux/msm-bus.h>
35#include "mdss.h"
36#include "mdss_debug.h"
37#include "mdss_fb.h"
38#include "mdss_mdp.h"
39#include "mdss_smmu.h"
40#include "mdss_mdp_wfd.h"
41#include "mdss_dsi_clk.h"
42
43#define VSYNC_PERIOD 16
44#define BORDERFILL_NDX 0x0BF000BF
45#define CHECK_BOUNDS(offset, size, max_size) \
46 (((size) > (max_size)) || ((offset) > ((max_size) - (size))))
47
48#define IS_RIGHT_MIXER_OV(flags, dst_x, left_lm_w) \
49 ((flags & MDSS_MDP_RIGHT_MIXER) || (dst_x >= left_lm_w))
50
51#define BUF_POOL_SIZE 32
52
53#define DFPS_DATA_MAX_HFP 8192
54#define DFPS_DATA_MAX_HBP 8192
55#define DFPS_DATA_MAX_HPW 8192
56#define DFPS_DATA_MAX_FPS 0x7fffffff
57#define DFPS_DATA_MAX_CLK_RATE 250000
58
59static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd);
60static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd);
61static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd);
62static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd);
63static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val);
64static int __vsync_set_vsync_handler(struct msm_fb_data_type *mfd);
65static int mdss_mdp_update_panel_info(struct msm_fb_data_type *mfd,
66 int mode, int dest_ctrl);
67static int mdss_mdp_set_cfg(struct msm_fb_data_type *mfd,
68 struct mdp_set_cfg *cfg);
69
70static inline bool is_ov_right_blend(struct mdp_rect *left_blend,
71 struct mdp_rect *right_blend, u32 left_lm_w)
72{
73 return (((left_blend->x + left_blend->w) == right_blend->x) &&
74 ((left_blend->x + left_blend->w) != left_lm_w) &&
75 (left_blend->x != right_blend->x) &&
76 (left_blend->y == right_blend->y) &&
77 (left_blend->h == right_blend->h));
78}
79
80/**
81 * __is_more_decimation_doable() -
82 * @pipe: pointer to pipe data structure
83 *
84 * if per pipe BW exceeds the limit and user
85 * has not requested decimation then return
86 * -E2BIG error back to user else try more
87 * decimation based on following table config.
88 *
89 * ----------------------------------------------------------
90 * error | split mode | src_split | v_deci | action |
91 * ------|------------|-----------|--------|----------------|
92 * | | | 00 | return error |
93 * | | enabled |--------|----------------|
94 * | | | >1 | more decmation |
95 * | yes |-----------|--------|----------------|
96 * | | | 00 | return error |
97 * | | disabled |--------|----------------|
98 * | | | >1 | return error |
99 * E2BIG |------------|-----------|--------|----------------|
100 * | | | 00 | return error |
101 * | | enabled |--------|----------------|
102 * | | | >1 | more decmation |
103 * | no |-----------|--------|----------------|
104 * | | | 00 | return error |
105 * | | disabled |--------|----------------|
106 * | | | >1 | more decmation |
107 * ----------------------------------------------------------
108 */
109static inline bool __is_more_decimation_doable(struct mdss_mdp_pipe *pipe)
110{
111 struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
112 struct msm_fb_data_type *mfd = pipe->mixer_left->ctl->mfd;
113
114 if (!mfd->split_mode && !pipe->vert_deci)
115 return false;
116 else if (mfd->split_mode && (!mdata->has_src_split ||
117 (mdata->has_src_split && !pipe->vert_deci)))
118 return false;
119 else
120 return true;
121}
122
123static struct mdss_mdp_pipe *__overlay_find_pipe(
124 struct msm_fb_data_type *mfd, u32 ndx)
125{
126 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
127 struct mdss_mdp_pipe *tmp, *pipe = NULL;
128
129 mutex_lock(&mdp5_data->list_lock);
130 list_for_each_entry(tmp, &mdp5_data->pipes_used, list) {
131 if (tmp->ndx == ndx) {
132 pipe = tmp;
133 break;
134 }
135 }
136 mutex_unlock(&mdp5_data->list_lock);
137
138 return pipe;
139}
140
141static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
142 struct mdp_overlay *req)
143{
144 struct mdss_mdp_pipe *pipe;
145
146 pipe = __overlay_find_pipe(mfd, req->id);
147 if (!pipe) {
148 pr_err("invalid pipe ndx=%x\n", req->id);
149 return pipe ? PTR_ERR(pipe) : -ENODEV;
150 }
151
152 *req = pipe->req_data;
153
154 return 0;
155}
156
157static int mdss_mdp_ov_xres_check(struct msm_fb_data_type *mfd,
158 struct mdp_overlay *req)
159{
160 u32 xres = 0;
161 u32 left_lm_w = left_lm_w_from_mfd(mfd);
162 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
163 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
164
165 if (IS_RIGHT_MIXER_OV(req->flags, req->dst_rect.x, left_lm_w)) {
166 if (mdata->has_src_split) {
167 xres = left_lm_w;
168
169 if (req->flags & MDSS_MDP_RIGHT_MIXER) {
170 pr_warn("invalid use of RIGHT_MIXER flag.\n");
171 /*
172 * if chip-set is capable of source split then
173 * all layers which are only on right LM should
174 * have their x offset relative to left LM's
175 * left-top or in other words relative to
176 * panel width.
177 * By modifying dst_x below, we are assuming
178 * that client is running in legacy mode
179 * chipset capable of source split.
180 */
181 if (req->dst_rect.x < left_lm_w)
182 req->dst_rect.x += left_lm_w;
183
184 req->flags &= ~MDSS_MDP_RIGHT_MIXER;
185 }
186 } else if (req->dst_rect.x >= left_lm_w) {
187 /*
188 * this is a step towards removing a reliance on
189 * MDSS_MDP_RIGHT_MIXER flags. With the new src split
190 * code, some clients of non-src-split chipsets have
191 * stopped sending MDSS_MDP_RIGHT_MIXER flag and
192 * modified their xres relative to full panel
193 * dimensions. In such cases, we need to deduct left
194 * layer mixer width before we program this HW.
195 */
196 req->dst_rect.x -= left_lm_w;
197 req->flags |= MDSS_MDP_RIGHT_MIXER;
198 }
199
200 if (ctl->mixer_right) {
201 xres += ctl->mixer_right->width;
202 } else {
203 pr_err("ov cannot be placed on right mixer\n");
204 return -EPERM;
205 }
206 } else {
207 if (ctl->mixer_left) {
208 xres = ctl->mixer_left->width;
209 } else {
210 pr_err("ov cannot be placed on left mixer\n");
211 return -EPERM;
212 }
213
214 if (mdata->has_src_split && ctl->mixer_right)
215 xres += ctl->mixer_right->width;
216 }
217
218 if (CHECK_BOUNDS(req->dst_rect.x, req->dst_rect.w, xres)) {
219 pr_err("dst_xres is invalid. dst_x:%d, dst_w:%d, xres:%d\n",
220 req->dst_rect.x, req->dst_rect.w, xres);
221 return -EOVERFLOW;
222 }
223
224 return 0;
225}
226
227int mdss_mdp_overlay_req_check(struct msm_fb_data_type *mfd,
228 struct mdp_overlay *req,
229 struct mdss_mdp_format_params *fmt)
230{
231 u32 yres;
232 u32 min_src_size, min_dst_size;
233 int content_secure;
234 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
235 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
236
237 yres = mfd->fbi->var.yres;
238
239 content_secure = (req->flags & MDP_SECURE_OVERLAY_SESSION);
240 if (!ctl->is_secure && content_secure &&
241 (mfd->panel.type == WRITEBACK_PANEL)) {
242 pr_debug("return due to security concerns\n");
243 return -EPERM;
244 }
245 if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102) {
246 min_src_size = fmt->is_yuv ? 2 : 1;
247 min_dst_size = 1;
248 } else {
249 min_src_size = fmt->is_yuv ? 10 : 5;
250 min_dst_size = 2;
251 }
252
253 if (req->z_order >= (mdata->max_target_zorder + MDSS_MDP_STAGE_0)) {
254 pr_err("zorder %d out of range\n", req->z_order);
255 return -ERANGE;
256 }
257
258 /*
259 * Cursor overlays are only supported for targets
260 * with dedicated cursors within VP
261 */
262 if ((req->pipe_type == MDSS_MDP_PIPE_TYPE_CURSOR) &&
263 ((req->z_order != HW_CURSOR_STAGE(mdata)) ||
264 !mdata->ncursor_pipes ||
265 (req->src_rect.w > mdata->max_cursor_size))) {
266 pr_err("Incorrect cursor overlay cursor_pipes=%d zorder=%d\n",
267 mdata->ncursor_pipes, req->z_order);
268 return -EINVAL;
269 }
270
271 if (req->src.width > MAX_IMG_WIDTH ||
272 req->src.height > MAX_IMG_HEIGHT ||
273 req->src_rect.w < min_src_size || req->src_rect.h < min_src_size ||
274 CHECK_BOUNDS(req->src_rect.x, req->src_rect.w, req->src.width) ||
275 CHECK_BOUNDS(req->src_rect.y, req->src_rect.h, req->src.height)) {
276 pr_err("invalid source image img wh=%dx%d rect=%d,%d,%d,%d\n",
277 req->src.width, req->src.height,
278 req->src_rect.x, req->src_rect.y,
279 req->src_rect.w, req->src_rect.h);
280 return -EOVERFLOW;
281 }
282
283 if (req->dst_rect.w < min_dst_size || req->dst_rect.h < min_dst_size) {
284 pr_err("invalid destination resolution (%dx%d)",
285 req->dst_rect.w, req->dst_rect.h);
286 return -EOVERFLOW;
287 }
288
289 if (req->horz_deci || req->vert_deci) {
290 if (!mdata->has_decimation) {
291 pr_err("No Decimation in MDP V=%x\n", mdata->mdp_rev);
292 return -EINVAL;
293 } else if ((req->horz_deci > MAX_DECIMATION) ||
294 (req->vert_deci > MAX_DECIMATION)) {
295 pr_err("Invalid decimation factors horz=%d vert=%d\n",
296 req->horz_deci, req->vert_deci);
297 return -EINVAL;
298 } else if (req->flags & MDP_BWC_EN) {
299 pr_err("Decimation can't be enabled with BWC\n");
300 return -EINVAL;
301 } else if (fmt->fetch_mode != MDSS_MDP_FETCH_LINEAR) {
302 pr_err("Decimation can't be enabled with MacroTile format\n");
303 return -EINVAL;
304 }
305 }
306
307 if (!(req->flags & MDSS_MDP_ROT_ONLY)) {
308 u32 src_w, src_h, dst_w, dst_h;
309
310 if (CHECK_BOUNDS(req->dst_rect.y, req->dst_rect.h, yres)) {
311 pr_err("invalid vertical destination: y=%d, h=%d\n",
312 req->dst_rect.y, req->dst_rect.h);
313 return -EOVERFLOW;
314 }
315
316 if (req->flags & MDP_ROT_90) {
317 dst_h = req->dst_rect.w;
318 dst_w = req->dst_rect.h;
319 } else {
320 dst_w = req->dst_rect.w;
321 dst_h = req->dst_rect.h;
322 }
323
324 src_w = DECIMATED_DIMENSION(req->src_rect.w, req->horz_deci);
325 src_h = DECIMATED_DIMENSION(req->src_rect.h, req->vert_deci);
326
327 if (src_w > mdata->max_pipe_width) {
328 pr_err("invalid source width=%d HDec=%d\n",
329 req->src_rect.w, req->horz_deci);
330 return -EINVAL;
331 }
332
333 if ((src_w * MAX_UPSCALE_RATIO) < dst_w) {
334 pr_err("too much upscaling Width %d->%d\n",
335 req->src_rect.w, req->dst_rect.w);
336 return -EINVAL;
337 }
338
339 if ((src_h * MAX_UPSCALE_RATIO) < dst_h) {
340 pr_err("too much upscaling. Height %d->%d\n",
341 req->src_rect.h, req->dst_rect.h);
342 return -EINVAL;
343 }
344
345 if (src_w > (dst_w * MAX_DOWNSCALE_RATIO)) {
346 pr_err("too much downscaling. Width %d->%d H Dec=%d\n",
347 src_w, req->dst_rect.w, req->horz_deci);
348 return -EINVAL;
349 }
350
351 if (src_h > (dst_h * MAX_DOWNSCALE_RATIO)) {
352 pr_err("too much downscaling. Height %d->%d V Dec=%d\n",
353 src_h, req->dst_rect.h, req->vert_deci);
354 return -EINVAL;
355 }
356
357 if (req->flags & MDP_BWC_EN) {
358 if ((req->src.width != req->src_rect.w) ||
359 (req->src.height != req->src_rect.h)) {
360 pr_err("BWC: mismatch of src img=%dx%d rect=%dx%d\n",
361 req->src.width, req->src.height,
362 req->src_rect.w, req->src_rect.h);
363 return -EINVAL;
364 }
365
366 if ((req->flags & MDP_DECIMATION_EN) ||
367 req->vert_deci || req->horz_deci) {
368 pr_err("Can't enable BWC and decimation\n");
369 return -EINVAL;
370 }
371 }
372
373 if ((req->flags & MDP_DEINTERLACE) &&
374 !req->scale.enable_pxl_ext) {
375 if (req->flags & MDP_SOURCE_ROTATED_90) {
376 if ((req->src_rect.w % 4) != 0) {
377 pr_err("interlaced rect not h/4\n");
378 return -EINVAL;
379 }
380 } else if ((req->src_rect.h % 4) != 0) {
381 pr_err("interlaced rect not h/4\n");
382 return -EINVAL;
383 }
384 }
385 } else {
386 if (req->flags & MDP_DEINTERLACE) {
387 if ((req->src_rect.h % 4) != 0) {
388 pr_err("interlaced rect h not multiple of 4\n");
389 return -EINVAL;
390 }
391 }
392 }
393
394 if (fmt->is_yuv) {
395 if ((req->src_rect.x & 0x1) || (req->src_rect.y & 0x1) ||
396 (req->src_rect.w & 0x1) || (req->src_rect.h & 0x1)) {
397 pr_err("invalid odd src resolution or coordinates\n");
398 return -EINVAL;
399 }
400 }
401
402 return 0;
403}
404
405int mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
406 u32 flags)
407{
408 struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
409 struct mdss_mdp_perf_params perf;
410 int rc;
411
412 memset(&perf, 0, sizeof(perf));
413
414 flags |= PERF_CALC_PIPE_APPLY_CLK_FUDGE |
415 PERF_CALC_PIPE_CALC_SMP_SIZE;
416
417 for (;;) {
418 rc = mdss_mdp_perf_calc_pipe(pipe, &perf, NULL,
419 flags);
420
421 if (!rc && (perf.mdp_clk_rate <= mdata->max_mdp_clk_rate)) {
422 rc = mdss_mdp_perf_bw_check_pipe(&perf, pipe);
423 if (!rc) {
424 break;
425 } else if (rc == -E2BIG &&
426 !__is_more_decimation_doable(pipe)) {
427 pr_debug("pipe%d exceeded per pipe BW\n",
428 pipe->num);
429 return rc;
430 }
431 }
432
433 /*
434 * if decimation is available try to reduce minimum clock rate
435 * requirement by applying vertical decimation and reduce
436 * mdp clock requirement
437 */
438 if (mdata->has_decimation && (pipe->vert_deci < MAX_DECIMATION)
439 && !pipe->bwc_mode && !pipe->scaler.enable &&
440 mdss_mdp_is_linear_format(pipe->src_fmt))
441 pipe->vert_deci++;
442 else
443 return -E2BIG;
444 }
445
446 return 0;
447}
448
449static int __mdss_mdp_validate_pxl_extn(struct mdss_mdp_pipe *pipe)
450{
451 int plane;
452
453 for (plane = 0; plane < MAX_PLANES; plane++) {
454 u32 hor_req_pixels, hor_fetch_pixels;
455 u32 hor_ov_fetch, vert_ov_fetch;
456 u32 vert_req_pixels, vert_fetch_pixels;
457 u32 src_w = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
458 u32 src_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
459
460 /*
461 * plane 1 and 2 are for chroma and are same. While configuring
462 * HW, programming only one of the chroma components is
463 * sufficient.
464 */
465 if (plane == 2)
466 continue;
467
468 /*
469 * For chroma plane, width is half for the following sub sampled
470 * formats. Except in case of decimation, where hardware avoids
471 * 1 line of decimation instead of downsampling.
472 */
473 if (plane == 1 && !pipe->horz_deci &&
474 ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
475 (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H2V1))) {
476 src_w >>= 1;
477 }
478
479 if (plane == 1 && !pipe->vert_deci &&
480 ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
481 (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2)))
482 src_h >>= 1;
483
484 hor_req_pixels = pipe->scaler.roi_w[plane] +
485 pipe->scaler.num_ext_pxls_left[plane] +
486 pipe->scaler.num_ext_pxls_right[plane];
487
488 hor_fetch_pixels = src_w +
489 (pipe->scaler.left_ftch[plane] >> pipe->horz_deci) +
490 pipe->scaler.left_rpt[plane] +
491 (pipe->scaler.right_ftch[plane] >> pipe->horz_deci) +
492 pipe->scaler.right_rpt[plane];
493
494 hor_ov_fetch = src_w +
495 (pipe->scaler.left_ftch[plane] >> pipe->horz_deci)+
496 (pipe->scaler.right_ftch[plane] >> pipe->horz_deci);
497
498 vert_req_pixels = pipe->scaler.num_ext_pxls_top[plane] +
499 pipe->scaler.num_ext_pxls_btm[plane];
500
501 vert_fetch_pixels =
502 (pipe->scaler.top_ftch[plane] >> pipe->vert_deci) +
503 pipe->scaler.top_rpt[plane] +
504 (pipe->scaler.btm_ftch[plane] >> pipe->vert_deci)+
505 pipe->scaler.btm_rpt[plane];
506
507 vert_ov_fetch = src_h +
508 (pipe->scaler.top_ftch[plane] >> pipe->vert_deci)+
509 (pipe->scaler.btm_ftch[plane] >> pipe->vert_deci);
510
511 if ((hor_req_pixels != hor_fetch_pixels) ||
512 (hor_ov_fetch > pipe->img_width) ||
513 (vert_req_pixels != vert_fetch_pixels) ||
514 (vert_ov_fetch > pipe->img_height)) {
515 pr_err("err: plane=%d h_req:%d h_fetch:%d v_req:%d v_fetch:%d\n",
516 plane,
517 hor_req_pixels, hor_fetch_pixels,
518 vert_req_pixels, vert_fetch_pixels);
519 pr_err("roi_w[%d]=%d, src_img:[%d, %d]\n",
520 plane, pipe->scaler.roi_w[plane],
521 pipe->img_width, pipe->img_height);
522 pipe->scaler.enable = 0;
523 return -EINVAL;
524 }
525 }
526
527 return 0;
528}
529
530int mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe)
531{
532 u32 src;
533 int rc = 0;
534 struct mdss_data_type *mdata;
535
536 mdata = mdss_mdp_get_mdata();
537 if (pipe->scaler.enable) {
538 if (!test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
539 rc = __mdss_mdp_validate_pxl_extn(pipe);
540 return rc;
541 }
542
543 memset(&pipe->scaler, 0, sizeof(struct mdp_scale_data_v2));
544 src = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
545 rc = mdss_mdp_calc_phase_step(src, pipe->dst.w,
546 &pipe->scaler.phase_step_x[0]);
547 if (rc == -EOVERFLOW) {
548 /* overflow on horizontal direction is acceptable */
549 rc = 0;
550 } else if (rc) {
551 pr_err("Horizontal scaling calculation failed=%d! %d->%d\n",
552 rc, src, pipe->dst.w);
553 return rc;
554 }
555
556 src = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
557 rc = mdss_mdp_calc_phase_step(src, pipe->dst.h,
558 &pipe->scaler.phase_step_y[0]);
559
560 if ((rc == -EOVERFLOW) && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)) {
561 /* overflow on Qseed2 scaler is acceptable */
562 rc = 0;
563 } else if (rc == -EOVERFLOW) {
564 /* overflow expected and should fallback to GPU */
565 rc = -ECANCELED;
566 } else if (rc) {
567 pr_err("Vertical scaling calculation failed=%d! %d->%d\n",
568 rc, src, pipe->dst.h);
569 }
570
571 if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
572 mdss_mdp_pipe_calc_qseed3_cfg(pipe);
573 else
574 mdss_mdp_pipe_calc_pixel_extn(pipe);
575
576 return rc;
577}
578
579inline void mdss_mdp_overlay_set_chroma_sample(
580 struct mdss_mdp_pipe *pipe)
581{
582 pipe->chroma_sample_v = pipe->chroma_sample_h = 0;
583
584 switch (pipe->src_fmt->chroma_sample) {
585 case MDSS_MDP_CHROMA_H1V2:
586 pipe->chroma_sample_v = 1;
587 break;
588 case MDSS_MDP_CHROMA_H2V1:
589 pipe->chroma_sample_h = 1;
590 break;
591 case MDSS_MDP_CHROMA_420:
592 pipe->chroma_sample_v = 1;
593 pipe->chroma_sample_h = 1;
594 break;
595 }
596 if (pipe->horz_deci)
597 pipe->chroma_sample_h = 0;
598 if (pipe->vert_deci)
599 pipe->chroma_sample_v = 0;
600}
601
602int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
603 struct mdp_overlay *req, struct mdss_mdp_pipe **ppipe,
604 struct mdss_mdp_pipe *left_blend_pipe, bool is_single_layer)
605{
606 struct mdss_mdp_format_params *fmt;
607 struct mdss_mdp_pipe *pipe;
608 struct mdss_mdp_mixer *mixer = NULL;
609 u32 pipe_type, mixer_mux;
610 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
611 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
612 int ret;
613 u32 bwc_enabled;
614 u32 rot90;
615 bool is_vig_needed = false;
616 u32 left_lm_w = left_lm_w_from_mfd(mfd);
617 u32 flags = 0;
618
619 if (mdp5_data->ctl == NULL)
620 return -ENODEV;
621
622 if (req->flags & MDP_ROT_90) {
623 pr_err("unsupported inline rotation\n");
624 return -EOPNOTSUPP;
625 }
626
627 if ((req->dst_rect.w > mdata->max_mixer_width) ||
628 (req->dst_rect.h > MAX_DST_H)) {
629 pr_err("exceeded max mixer supported resolution %dx%d\n",
630 req->dst_rect.w, req->dst_rect.h);
631 return -EOVERFLOW;
632 }
633
634 if (IS_RIGHT_MIXER_OV(req->flags, req->dst_rect.x, left_lm_w))
635 mixer_mux = MDSS_MDP_MIXER_MUX_RIGHT;
636 else
637 mixer_mux = MDSS_MDP_MIXER_MUX_LEFT;
638
639 pr_debug("ctl=%u req id=%x mux=%d z_order=%d flags=0x%x dst_x:%d\n",
640 mdp5_data->ctl->num, req->id, mixer_mux, req->z_order,
641 req->flags, req->dst_rect.x);
642
643 fmt = mdss_mdp_get_format_params(req->src.format);
644 if (!fmt) {
645 pr_err("invalid pipe format %d\n", req->src.format);
646 return -EINVAL;
647 }
648
649 bwc_enabled = req->flags & MDP_BWC_EN;
650 rot90 = req->flags & MDP_SOURCE_ROTATED_90;
651
652 /*
653 * Always set yuv rotator output to pseudo planar.
654 */
655 if (bwc_enabled || rot90) {
656 req->src.format =
657 mdss_mdp_get_rotator_dst_format(req->src.format, rot90,
658 bwc_enabled);
659 fmt = mdss_mdp_get_format_params(req->src.format);
660 if (!fmt) {
661 pr_err("invalid pipe format %d\n", req->src.format);
662 return -EINVAL;
663 }
664 }
665
666 ret = mdss_mdp_ov_xres_check(mfd, req);
667 if (ret)
668 return ret;
669
670 ret = mdss_mdp_overlay_req_check(mfd, req, fmt);
671 if (ret)
672 return ret;
673
674 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
675 if (!mixer) {
676 pr_err("unable to get mixer\n");
677 return -ENODEV;
678 }
679
680 if ((mdata->has_non_scalar_rgb) &&
681 ((req->src_rect.w != req->dst_rect.w) ||
682 (req->src_rect.h != req->dst_rect.h)))
683 is_vig_needed = true;
684
685 if (req->id == MSMFB_NEW_REQUEST) {
686 switch (req->pipe_type) {
687 case PIPE_TYPE_VIG:
688 pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
689 break;
690 case PIPE_TYPE_RGB:
691 pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
692 break;
693 case PIPE_TYPE_DMA:
694 pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
695 break;
696 case PIPE_TYPE_CURSOR:
697 pipe_type = MDSS_MDP_PIPE_TYPE_CURSOR;
698 break;
699 case PIPE_TYPE_AUTO:
700 default:
701 if (req->flags & MDP_OV_PIPE_FORCE_DMA)
702 pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
703 else if (fmt->is_yuv ||
704 (req->flags & MDP_OV_PIPE_SHARE) ||
705 is_vig_needed)
706 pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
707 else
708 pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
709 break;
710 }
711
712 pipe = mdss_mdp_pipe_alloc(mixer, pipe_type, left_blend_pipe);
713
714 /* RGB pipes can be used instead of DMA */
715 if (IS_ERR_OR_NULL(pipe) &&
716 (req->pipe_type == PIPE_TYPE_AUTO) &&
717 (pipe_type == MDSS_MDP_PIPE_TYPE_DMA)) {
718 pr_debug("giving RGB pipe for fb%d. flags:0x%x\n",
719 mfd->index, req->flags);
720 pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
721 pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
722 left_blend_pipe);
723 }
724
725 /* VIG pipes can also support RGB format */
726 if (IS_ERR_OR_NULL(pipe) &&
727 (req->pipe_type == PIPE_TYPE_AUTO) &&
728 (pipe_type == MDSS_MDP_PIPE_TYPE_RGB)) {
729 pr_debug("giving ViG pipe for fb%d. flags:0x%x\n",
730 mfd->index, req->flags);
731 pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
732 pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
733 left_blend_pipe);
734 }
735
736 if (IS_ERR(pipe)) {
737 return PTR_ERR(pipe);
738 } else if (!pipe) {
739 pr_err("error allocating pipe. flags=0x%x req->pipe_type=%d pipe_type=%d\n",
740 req->flags, req->pipe_type, pipe_type);
741 return -ENODEV;
742 }
743
744 ret = mdss_mdp_pipe_map(pipe);
745 if (ret) {
746 pr_err("unable to map pipe=%d\n", pipe->num);
747 return ret;
748 }
749
750 mutex_lock(&mdp5_data->list_lock);
751 list_add(&pipe->list, &mdp5_data->pipes_used);
752 mutex_unlock(&mdp5_data->list_lock);
753 pipe->mixer_left = mixer;
754 pipe->mfd = mfd;
755 pipe->play_cnt = 0;
756 } else {
757 pipe = __overlay_find_pipe(mfd, req->id);
758 if (!pipe) {
759 pr_err("invalid pipe ndx=%x\n", req->id);
760 return -ENODEV;
761 }
762
763 ret = mdss_mdp_pipe_map(pipe);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530764 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530765 pr_err("Unable to map used pipe%d ndx=%x\n",
766 pipe->num, pipe->ndx);
767 return ret;
768 }
769
770 if (is_vig_needed && (pipe->type != MDSS_MDP_PIPE_TYPE_VIG)) {
771 pr_err("pipe is non-scalar ndx=%x\n", req->id);
772 ret = -EINVAL;
773 goto exit_fail;
774 }
775
776 if ((pipe->mixer_left != mixer) &&
777 (pipe->type != MDSS_MDP_PIPE_TYPE_CURSOR)) {
778 if (!mixer->ctl || (mixer->ctl->mfd != mfd)) {
779 pr_err("Can't switch mixer %d->%d pnum %d!\n",
780 pipe->mixer_left->num, mixer->num,
781 pipe->num);
782 ret = -EINVAL;
783 goto exit_fail;
784 }
785 pr_debug("switching pipe%d mixer %d->%d stage%d\n",
786 pipe->num,
787 pipe->mixer_left ? pipe->mixer_left->num : -1,
788 mixer->num, req->z_order);
789 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
790 pipe->mixer_left = mixer;
791 }
792 }
793
794 if (left_blend_pipe) {
795 if (pipe->priority <= left_blend_pipe->priority) {
796 pr_err("priority limitation. left:%d right%d\n",
797 left_blend_pipe->priority, pipe->priority);
798 ret = -EBADSLT;
799 goto exit_fail;
800 } else {
801 pr_debug("pipe%d is a right_pipe\n", pipe->num);
802 pipe->is_right_blend = true;
803 }
804 } else if (pipe->is_right_blend) {
805 /*
806 * pipe used to be right blend need to update mixer
807 * configuration to remove it as a right blend
808 */
809 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
810 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
811 pipe->is_right_blend = false;
812 }
813
814 if (mfd->panel_orientation)
815 req->flags ^= mfd->panel_orientation;
816
817 req->priority = pipe->priority;
818 if (!pipe->dirty && !memcmp(req, &pipe->req_data, sizeof(*req))) {
819 pr_debug("skipping pipe_reconfiguration\n");
820 goto skip_reconfigure;
821 }
822
823 pipe->flags = req->flags;
824 if (bwc_enabled && !mdp5_data->mdata->has_bwc) {
825 pr_err("BWC is not supported in MDP version %x\n",
826 mdp5_data->mdata->mdp_rev);
827 pipe->bwc_mode = 0;
828 } else {
829 pipe->bwc_mode = pipe->mixer_left->rotator_mode ?
830 0 : (bwc_enabled ? 1 : 0);
831 }
832 pipe->img_width = req->src.width & 0x3fff;
833 pipe->img_height = req->src.height & 0x3fff;
834 pipe->src.x = req->src_rect.x;
835 pipe->src.y = req->src_rect.y;
836 pipe->src.w = req->src_rect.w;
837 pipe->src.h = req->src_rect.h;
838 pipe->dst.x = req->dst_rect.x;
839 pipe->dst.y = req->dst_rect.y;
840 pipe->dst.w = req->dst_rect.w;
841 pipe->dst.h = req->dst_rect.h;
842
843 if (mixer->ctl) {
844 pipe->dst.x += mixer->ctl->border_x_off;
845 pipe->dst.y += mixer->ctl->border_y_off;
846 }
847
848 if (mfd->panel_orientation & MDP_FLIP_LR)
849 pipe->dst.x = pipe->mixer_left->width
850 - pipe->dst.x - pipe->dst.w;
851 if (mfd->panel_orientation & MDP_FLIP_UD)
852 pipe->dst.y = pipe->mixer_left->height
853 - pipe->dst.y - pipe->dst.h;
854
855 pipe->horz_deci = req->horz_deci;
856 pipe->vert_deci = req->vert_deci;
857
858 /*
859 * check if overlay span across two mixers and if source split is
860 * available. If yes, enable src_split_req flag so that during mixer
861 * staging, same pipe will be stagged on both layer mixers.
862 */
863 if (mdata->has_src_split) {
864 if ((pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR) &&
865 is_split_lm(mfd)) {
866 pipe->src_split_req = true;
867 } else if ((mixer_mux == MDSS_MDP_MIXER_MUX_LEFT) &&
868 ((req->dst_rect.x + req->dst_rect.w) > mixer->width)) {
869 if (req->dst_rect.x >= mixer->width) {
870 pr_err("%pS: err dst_x can't lie in right half",
871 __builtin_return_address(0));
872 pr_cont(" flags:0x%x dst x:%d w:%d lm_w:%d\n",
873 req->flags, req->dst_rect.x,
874 req->dst_rect.w, mixer->width);
875 ret = -EINVAL;
876 goto exit_fail;
877 } else {
878 pipe->src_split_req = true;
879 }
880 } else {
881 if (pipe->src_split_req) {
882 mdss_mdp_mixer_pipe_unstage(pipe,
883 pipe->mixer_right);
884 pipe->mixer_right = NULL;
885 }
886 pipe->src_split_req = false;
887 }
888 }
889
890 memcpy(&pipe->scaler, &req->scale, sizeof(struct mdp_scale_data));
891 pipe->src_fmt = fmt;
892 mdss_mdp_overlay_set_chroma_sample(pipe);
893
894 pipe->mixer_stage = req->z_order;
895 pipe->is_fg = req->is_fg;
896 pipe->alpha = req->alpha;
897 pipe->transp = req->transp_mask;
898 pipe->blend_op = req->blend_op;
899 if (pipe->blend_op == BLEND_OP_NOT_DEFINED)
900 pipe->blend_op = fmt->alpha_enable ?
901 BLEND_OP_PREMULTIPLIED :
902 BLEND_OP_OPAQUE;
903
904 if (!fmt->alpha_enable && (pipe->blend_op != BLEND_OP_OPAQUE))
905 pr_debug("Unintended blend_op %d on layer with no alpha plane\n",
906 pipe->blend_op);
907
908 if (fmt->is_yuv && !(pipe->flags & MDP_SOURCE_ROTATED_90) &&
909 !pipe->scaler.enable) {
910 pipe->overfetch_disable = OVERFETCH_DISABLE_BOTTOM;
911
912 if (!(pipe->flags & MDSS_MDP_DUAL_PIPE) ||
913 IS_RIGHT_MIXER_OV(pipe->flags, pipe->dst.x, left_lm_w))
914 pipe->overfetch_disable |= OVERFETCH_DISABLE_RIGHT;
915 pr_debug("overfetch flags=%x\n", pipe->overfetch_disable);
916 } else {
917 pipe->overfetch_disable = 0;
918 }
919 pipe->bg_color = req->bg_color;
920
921 if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
922 goto cursor_done;
923
924 mdss_mdp_pipe_pp_clear(pipe);
925 if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
926 memcpy(&pipe->pp_cfg, &req->overlay_pp_cfg,
927 sizeof(struct mdp_overlay_pp_params));
928 ret = mdss_mdp_pp_sspp_config(pipe);
929 if (ret) {
930 pr_err("failed to configure pp params ret %d\n", ret);
931 goto exit_fail;
932 }
933 }
934
935 /*
936 * Populate Color Space.
937 */
938 if (pipe->src_fmt->is_yuv && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG))
939 pipe->csc_coeff_set = req->color_space;
940 /*
941 * When scaling is enabled src crop and image
942 * width and height is modified by user
943 */
944 if ((pipe->flags & MDP_DEINTERLACE) && !pipe->scaler.enable) {
945 if (pipe->flags & MDP_SOURCE_ROTATED_90) {
946 pipe->src.x = DIV_ROUND_UP(pipe->src.x, 2);
947 pipe->src.x &= ~1;
948 pipe->src.w /= 2;
949 pipe->img_width /= 2;
950 } else {
951 pipe->src.h /= 2;
952 pipe->src.y = DIV_ROUND_UP(pipe->src.y, 2);
953 pipe->src.y &= ~1;
954 }
955 }
956
957 if (is_single_layer)
958 flags |= PERF_CALC_PIPE_SINGLE_LAYER;
959
960 ret = mdp_pipe_tune_perf(pipe, flags);
961 if (ret) {
962 pr_debug("unable to satisfy performance. ret=%d\n", ret);
963 goto exit_fail;
964 }
965
966 ret = mdss_mdp_overlay_setup_scaling(pipe);
967 if (ret)
968 goto exit_fail;
969
970 if ((mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
971 (mdp5_data->mdata->wfd_mode == MDSS_MDP_WFD_SHARED))
972 mdss_mdp_smp_release(pipe);
973
974 ret = mdss_mdp_smp_reserve(pipe);
975 if (ret) {
976 pr_debug("mdss_mdp_smp_reserve failed. pnum:%d ret=%d\n",
977 pipe->num, ret);
978 goto exit_fail;
979 }
980
981
982 req->id = pipe->ndx;
983
984cursor_done:
985 req->vert_deci = pipe->vert_deci;
986
987 pipe->req_data = *req;
988 pipe->dirty = false;
989
990 pipe->params_changed++;
991skip_reconfigure:
992 *ppipe = pipe;
993
994 mdss_mdp_pipe_unmap(pipe);
995
996 return ret;
997exit_fail:
998 mdss_mdp_pipe_unmap(pipe);
999
1000 mutex_lock(&mdp5_data->list_lock);
1001 if (pipe->play_cnt == 0) {
1002 pr_debug("failed for pipe %d\n", pipe->num);
1003 if (!list_empty(&pipe->list))
1004 list_del_init(&pipe->list);
1005 mdss_mdp_pipe_destroy(pipe);
1006 }
1007
1008 /* invalidate any overlays in this framebuffer after failure */
1009 list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
1010 pr_debug("freeing allocations for pipe %d\n", pipe->num);
1011 mdss_mdp_smp_unreserve(pipe);
1012 pipe->params_changed = 0;
1013 pipe->dirty = true;
1014 }
1015 mutex_unlock(&mdp5_data->list_lock);
1016 return ret;
1017}
1018
1019static int mdss_mdp_overlay_set(struct msm_fb_data_type *mfd,
1020 struct mdp_overlay *req)
1021{
1022 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1023 int ret;
1024
1025 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
1026 if (ret)
1027 return ret;
1028
1029 if (mdss_fb_is_power_off(mfd)) {
1030 mutex_unlock(&mdp5_data->ov_lock);
1031 return -EPERM;
1032 }
1033
1034 if (req->src.format == MDP_RGB_BORDERFILL) {
1035 req->id = BORDERFILL_NDX;
1036 } else {
1037 struct mdss_mdp_pipe *pipe;
1038
1039 /* userspace zorder start with stage 0 */
1040 req->z_order += MDSS_MDP_STAGE_0;
1041
1042 ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL, false);
1043
1044 req->z_order -= MDSS_MDP_STAGE_0;
1045 }
1046
1047 mutex_unlock(&mdp5_data->ov_lock);
1048
1049 return ret;
1050}
1051
1052/*
1053 * it's caller responsibility to acquire mdp5_data->list_lock while calling
1054 * this function
1055 */
1056struct mdss_mdp_data *mdss_mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd,
1057 struct mdss_mdp_pipe *pipe)
1058{
1059 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1060 struct mdss_mdp_data *buf;
1061 int i;
1062
1063 if (list_empty(&mdp5_data->bufs_pool)) {
1064 pr_debug("allocating %u bufs for fb%d\n",
1065 BUF_POOL_SIZE, mfd->index);
1066
1067 buf = kcalloc(BUF_POOL_SIZE, sizeof(*buf), GFP_KERNEL);
1068 if (!buf)
1069 return NULL;
1070
1071 list_add(&buf->chunk_list, &mdp5_data->bufs_chunks);
1072 kmemleak_not_leak(buf);
1073
1074 for (i = 0; i < BUF_POOL_SIZE; i++) {
1075 buf->state = MDP_BUF_STATE_UNUSED;
1076 list_add(&buf[i].buf_list, &mdp5_data->bufs_pool);
1077 }
1078 }
1079
1080 buf = list_first_entry(&mdp5_data->bufs_pool,
1081 struct mdss_mdp_data, buf_list);
1082 WARN_ON(buf->state != MDP_BUF_STATE_UNUSED);
1083 buf->state = MDP_BUF_STATE_READY;
1084 buf->last_alloc = local_clock();
1085 buf->last_pipe = pipe;
1086
1087 list_move_tail(&buf->buf_list, &mdp5_data->bufs_used);
1088 list_add_tail(&buf->pipe_list, &pipe->buf_queue);
1089
1090 pr_debug("buffer alloc: %pK\n", buf);
1091
1092 return buf;
1093}
1094
1095static
1096struct mdss_mdp_data *__mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd,
1097 struct mdss_mdp_pipe *pipe)
1098{
1099 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1100 struct mdss_mdp_data *buf;
1101
1102 mutex_lock(&mdp5_data->list_lock);
1103 buf = mdss_mdp_overlay_buf_alloc(mfd, pipe);
1104 mutex_unlock(&mdp5_data->list_lock);
1105
1106 return buf;
1107}
1108
1109static void mdss_mdp_overlay_buf_deinit(struct msm_fb_data_type *mfd)
1110{
1111 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1112 struct mdss_mdp_data *buf, *t;
1113
1114 pr_debug("performing cleanup of buffers pool on fb%d\n", mfd->index);
1115
1116 WARN_ON(!list_empty(&mdp5_data->bufs_used));
1117
1118 list_for_each_entry_safe(buf, t, &mdp5_data->bufs_pool, buf_list)
1119 list_del(&buf->buf_list);
1120
1121 list_for_each_entry_safe(buf, t, &mdp5_data->bufs_chunks, chunk_list) {
1122 list_del(&buf->chunk_list);
1123 kfree(buf);
1124 }
1125}
1126
1127/*
1128 * it's caller responsibility to acquire mdp5_data->list_lock while calling
1129 * this function
1130 */
1131void mdss_mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
1132 struct mdss_mdp_data *buf)
1133{
1134 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1135
1136 if (!list_empty(&buf->pipe_list))
1137 list_del_init(&buf->pipe_list);
1138
1139 mdss_mdp_data_free(buf, false, DMA_TO_DEVICE);
1140
1141 buf->last_freed = local_clock();
1142 buf->state = MDP_BUF_STATE_UNUSED;
1143
1144 pr_debug("buffer freed: %pK\n", buf);
1145
1146 list_move_tail(&buf->buf_list, &mdp5_data->bufs_pool);
1147}
1148
1149static void __mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
1150 struct mdss_mdp_data *buf)
1151{
1152 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1153
1154 mutex_lock(&mdp5_data->list_lock);
1155 mdss_mdp_overlay_buf_free(mfd, buf);
1156 mutex_unlock(&mdp5_data->list_lock);
1157}
1158
1159static inline void __pipe_buf_mark_cleanup(struct msm_fb_data_type *mfd,
1160 struct mdss_mdp_data *buf)
1161{
1162 /* buffer still in bufs_used, marking it as cleanup will clean it up */
1163 buf->state = MDP_BUF_STATE_CLEANUP;
1164 list_del_init(&buf->pipe_list);
1165}
1166
1167/**
1168 * __mdss_mdp_overlay_free_list_purge() - clear free list of buffers
1169 * @mfd: Msm frame buffer data structure for the associated fb
1170 *
1171 * Frees memory and clears current list of buffers which are pending free
1172 */
1173static void __mdss_mdp_overlay_free_list_purge(struct msm_fb_data_type *mfd)
1174{
1175 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1176 struct mdss_mdp_data *buf, *t;
1177
1178 pr_debug("purging fb%d free list\n", mfd->index);
1179
1180 list_for_each_entry_safe(buf, t, &mdp5_data->bufs_freelist, buf_list)
1181 mdss_mdp_overlay_buf_free(mfd, buf);
1182}
1183
1184static void __overlay_pipe_cleanup(struct msm_fb_data_type *mfd,
1185 struct mdss_mdp_pipe *pipe)
1186{
1187 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1188 struct mdss_mdp_data *buf, *tmpbuf;
1189
1190 list_for_each_entry_safe(buf, tmpbuf, &pipe->buf_queue, pipe_list) {
1191 __pipe_buf_mark_cleanup(mfd, buf);
1192 list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
1193
1194 /*
1195 * in case of secure UI, the buffer needs to be released as
1196 * soon as session is closed.
1197 */
1198 if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
1199 mdss_mdp_overlay_buf_free(mfd, buf);
1200 }
1201
1202 mdss_mdp_pipe_destroy(pipe);
1203}
1204
1205/**
1206 * mdss_mdp_overlay_cleanup() - handles cleanup after frame commit
1207 * @mfd: Msm frame buffer data structure for the associated fb
1208 * @destroy_pipes: list of pipes that should be destroyed as part of cleanup
1209 *
1210 * Goes through destroy_pipes list and ensures they are ready to be destroyed
1211 * and cleaned up. Also cleanup of any pipe buffers after flip.
1212 */
1213static void mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd,
1214 struct list_head *destroy_pipes)
1215{
1216 struct mdss_mdp_pipe *pipe, *tmp;
1217 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1218 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1219 bool recovery_mode = false;
1220 bool skip_fetch_halt, pair_found;
1221 struct mdss_mdp_data *buf, *tmpbuf;
1222
1223 mutex_lock(&mdp5_data->list_lock);
1224 list_for_each_entry(pipe, destroy_pipes, list) {
1225 pair_found = false;
1226 skip_fetch_halt = false;
1227 tmp = pipe;
1228
1229 /*
1230 * Find if second rect is in the destroy list from the current
1231 * position. So if both rects are part of the destroy list then
1232 * fetch halt will be skipped for the 1st rect.
1233 */
1234 list_for_each_entry_from(tmp, destroy_pipes, list) {
1235 if (tmp->num == pipe->num) {
1236 pair_found = true;
1237 break;
1238 }
1239 }
1240
1241 /* skip fetch halt if pipe's other rect is still in use */
1242 if (!pair_found) {
1243 tmp = (struct mdss_mdp_pipe *)pipe->multirect.next;
1244 if (tmp)
1245 skip_fetch_halt =
1246 atomic_read(&tmp->kref.refcount);
1247 }
1248
1249 /* make sure pipe fetch has been halted before freeing buffer */
1250 if (!skip_fetch_halt && mdss_mdp_pipe_fetch_halt(pipe, false)) {
1251 /*
1252 * if pipe is not able to halt. Enter recovery mode,
1253 * by un-staging any pipes that are attached to mixer
1254 * so that any freed pipes that are not able to halt
1255 * can be staged in solid fill mode and be reset
1256 * with next vsync
1257 */
1258 if (!recovery_mode) {
1259 recovery_mode = true;
1260 mdss_mdp_mixer_unstage_all(ctl->mixer_left);
1261 mdss_mdp_mixer_unstage_all(ctl->mixer_right);
1262 }
1263 pipe->params_changed++;
1264 pipe->unhalted = true;
1265 mdss_mdp_pipe_queue_data(pipe, NULL);
1266 }
1267 }
1268
1269 if (recovery_mode) {
1270 pr_warn("performing recovery sequence for fb%d\n", mfd->index);
1271 __overlay_kickoff_requeue(mfd);
1272 }
1273
1274 __mdss_mdp_overlay_free_list_purge(mfd);
1275
1276 list_for_each_entry_safe(buf, tmpbuf, &mdp5_data->bufs_used, buf_list) {
1277 if (buf->state == MDP_BUF_STATE_CLEANUP)
1278 list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
1279 }
1280
1281 list_for_each_entry_safe(pipe, tmp, destroy_pipes, list) {
1282 list_del_init(&pipe->list);
1283 if (recovery_mode) {
1284 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
1285 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
1286 pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
1287 }
1288 __overlay_pipe_cleanup(mfd, pipe);
1289
1290 if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
1291 /*
1292 * track only RECT0, since at any given point there
1293 * can only be RECT0 only or RECT0 + RECT1
1294 */
1295 ctl->mixer_left->next_pipe_map &= ~pipe->ndx;
1296 if (ctl->mixer_right)
1297 ctl->mixer_right->next_pipe_map &= ~pipe->ndx;
1298 }
1299 }
1300 mutex_unlock(&mdp5_data->list_lock);
1301}
1302
1303void mdss_mdp_handoff_cleanup_pipes(struct msm_fb_data_type *mfd,
1304 u32 type)
1305{
1306 u32 i, npipes;
1307 struct mdss_mdp_pipe *pipe;
1308 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1309 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
1310
1311 switch (type) {
1312 case MDSS_MDP_PIPE_TYPE_VIG:
1313 pipe = mdata->vig_pipes;
1314 npipes = mdata->nvig_pipes;
1315 break;
1316 case MDSS_MDP_PIPE_TYPE_RGB:
1317 pipe = mdata->rgb_pipes;
1318 npipes = mdata->nrgb_pipes;
1319 break;
1320 case MDSS_MDP_PIPE_TYPE_DMA:
1321 pipe = mdata->dma_pipes;
1322 npipes = mdata->ndma_pipes;
1323 break;
1324 default:
1325 return;
1326 }
1327
1328 for (i = 0; i < npipes; i++) {
1329 /* only check for first rect and ignore additional */
1330 if (pipe->is_handed_off) {
1331 pr_debug("Unmapping handed off pipe %d\n", pipe->num);
1332 list_move(&pipe->list, &mdp5_data->pipes_cleanup);
1333 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
1334 pipe->is_handed_off = false;
1335 }
1336 pipe += pipe->multirect.max_rects;
1337 }
1338}
1339
1340/**
1341 * mdss_mdp_overlay_start() - Programs the MDP control data path to hardware
1342 * @mfd: Msm frame buffer structure associated with fb device.
1343 *
1344 * Program the MDP hardware with the control settings for the framebuffer
1345 * device. In addition to this, this function also handles the transition
1346 * from the the splash screen to the android boot animation when the
1347 * continuous splash screen feature is enabled.
1348 */
1349int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
1350{
1351 int rc;
1352 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1353 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
1354 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
1355
1356 if (mdss_mdp_ctl_is_power_on(ctl)) {
1357 if (!mdp5_data->mdata->batfet)
1358 mdss_mdp_batfet_ctrl(mdp5_data->mdata, true);
1359 mdss_mdp_release_splash_pipe(mfd);
1360 return 0;
1361 } else if (mfd->panel_info->cont_splash_enabled) {
1362 if (mdp5_data->allow_kickoff) {
1363 mdp5_data->allow_kickoff = false;
1364 } else {
1365 mutex_lock(&mdp5_data->list_lock);
1366 rc = list_empty(&mdp5_data->pipes_used);
1367 mutex_unlock(&mdp5_data->list_lock);
1368 if (rc) {
1369 pr_debug("empty kickoff on fb%d during cont splash\n",
1370 mfd->index);
1371 return -EPERM;
1372 }
1373 }
1374 } else if (mdata->handoff_pending) {
1375 pr_warn("fb%d: commit while splash handoff pending\n",
1376 mfd->index);
1377 return -EPERM;
1378 }
1379
1380 pr_debug("starting fb%d overlay\n", mfd->index);
1381
1382 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
1383
1384 /*
1385 * If idle pc feature is not enabled, then get a reference to the
1386 * runtime device which will be released when overlay is turned off
1387 */
1388 if (!mdp5_data->mdata->idle_pc_enabled ||
1389 (mfd->panel_info->type != MIPI_CMD_PANEL)) {
1390 rc = pm_runtime_get_sync(&mfd->pdev->dev);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301391 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301392 pr_err("unable to resume with pm_runtime_get_sync rc=%d\n",
1393 rc);
1394 goto end;
1395 }
1396 }
1397
1398 /*
1399 * We need to do hw init before any hw programming.
1400 * Also, hw init involves programming the VBIF registers which
1401 * should be done only after attaching IOMMU which in turn would call
1402 * in to TZ to restore security configs on the VBIF registers.
1403 * This is not needed when continuous splash screen is enabled since
1404 * we would have called in to TZ to restore security configs from LK.
1405 */
1406 if (!mfd->panel_info->cont_splash_enabled) {
1407 rc = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301408 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301409 pr_err("iommu attach failed rc=%d\n", rc);
1410 goto end;
1411 }
1412 mdss_hw_init(mdss_res);
1413 mdss_iommu_ctrl(0);
1414 }
1415
1416 /*
1417 * Increment the overlay active count prior to calling ctl_start.
1418 * This is needed to ensure that if idle power collapse kicks in
1419 * right away, it would be handled correctly.
1420 */
1421 atomic_inc(&mdp5_data->mdata->active_intf_cnt);
1422 rc = mdss_mdp_ctl_start(ctl, false);
1423 if (rc == 0) {
1424 mdss_mdp_ctl_notifier_register(mdp5_data->ctl,
1425 &mfd->mdp_sync_pt_data.notifier);
1426 } else {
1427 pr_err("mdp ctl start failed.\n");
1428 goto ctl_error;
1429 }
1430
1431 /* Restore any previously configured PP features by resetting the dirty
1432 * bits for enabled features. The dirty bits will be consumed during the
1433 * first display commit when the PP hardware blocks are updated
1434 */
1435 rc = mdss_mdp_pp_resume(mfd);
1436 if (rc && (rc != -EPERM) && (rc != -ENODEV))
1437 pr_err("PP resume err %d\n", rc);
1438
1439 rc = mdss_mdp_splash_cleanup(mfd, true);
1440 if (!rc)
1441 goto end;
1442
1443ctl_error:
1444 mdss_mdp_ctl_destroy(ctl);
1445 atomic_dec(&mdp5_data->mdata->active_intf_cnt);
1446 mdp5_data->ctl = NULL;
1447end:
1448 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
1449 return rc;
1450}
1451
1452static void mdss_mdp_overlay_update_pm(struct mdss_overlay_private *mdp5_data)
1453{
1454 ktime_t wakeup_time;
1455
1456 if (!mdp5_data->cpu_pm_hdl)
1457 return;
1458
1459 if (mdss_mdp_display_wakeup_time(mdp5_data->ctl, &wakeup_time))
1460 return;
1461
1462 activate_event_timer(mdp5_data->cpu_pm_hdl, wakeup_time);
1463}
1464
1465static void __unstage_pipe_and_clean_buf(struct msm_fb_data_type *mfd,
1466 struct mdss_mdp_pipe *pipe, struct mdss_mdp_data *buf)
1467{
1468
1469 pr_debug("unstaging pipe:%d rect:%d buf:%d\n",
1470 pipe->num, pipe->multirect.num, !buf);
1471 MDSS_XLOG(pipe->num, pipe->multirect.num, !buf);
1472 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
1473 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
1474 pipe->dirty = true;
1475
1476 if (buf)
1477 __pipe_buf_mark_cleanup(mfd, buf);
1478}
1479
1480static int __overlay_queue_pipes(struct msm_fb_data_type *mfd)
1481{
1482 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1483 struct mdss_mdp_pipe *pipe;
1484 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1485 struct mdss_mdp_ctl *tmp;
1486 int ret = 0;
1487
1488 list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
1489 struct mdss_mdp_data *buf;
1490
1491 if (pipe->dirty) {
1492 pr_err("fb%d: pipe %d dirty! skipping configuration\n",
1493 mfd->index, pipe->num);
1494 continue;
1495 }
1496
1497 /*
1498 * When secure display is enabled, if there is a non secure
1499 * display pipe, skip that
1500 */
1501 if (mdss_get_sd_client_cnt() &&
1502 !(pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)) {
1503 pr_warn("Non secure pipe during secure display: %u: %08X, skip\n",
1504 pipe->num, pipe->flags);
1505 continue;
1506 }
1507 /*
1508 * When external is connected and no dedicated wfd is present,
1509 * reprogram DMA pipe before kickoff to clear out any previous
1510 * block mode configuration.
1511 */
1512 if ((pipe->type == MDSS_MDP_PIPE_TYPE_DMA) &&
1513 (ctl->shared_lock &&
1514 (ctl->mdata->wfd_mode == MDSS_MDP_WFD_SHARED))) {
1515 if (ctl->mdata->mixer_switched) {
1516 ret = mdss_mdp_overlay_pipe_setup(mfd,
1517 &pipe->req_data, &pipe, NULL, false);
1518 pr_debug("resetting DMA pipe for ctl=%d",
1519 ctl->num);
1520 }
1521 if (ret) {
1522 pr_err("can't reset DMA pipe ret=%d ctl=%d\n",
1523 ret, ctl->num);
1524 return ret;
1525 }
1526
1527 tmp = mdss_mdp_ctl_mixer_switch(ctl,
1528 MDSS_MDP_WB_CTL_TYPE_LINE);
1529 if (!tmp)
1530 return -EINVAL;
1531 pipe->mixer_left = mdss_mdp_mixer_get(tmp,
1532 MDSS_MDP_MIXER_MUX_DEFAULT);
1533 }
1534
1535 buf = list_first_entry_or_null(&pipe->buf_queue,
1536 struct mdss_mdp_data, pipe_list);
1537 if (buf) {
1538 switch (buf->state) {
1539 case MDP_BUF_STATE_READY:
1540 pr_debug("pnum=%d buf=%pK first buffer ready\n",
1541 pipe->num, buf);
1542 break;
1543 case MDP_BUF_STATE_ACTIVE:
1544 if (list_is_last(&buf->pipe_list,
1545 &pipe->buf_queue)) {
1546 pr_debug("pnum=%d no buf update\n",
1547 pipe->num);
1548 } else {
1549 struct mdss_mdp_data *tmp = buf;
1550 /*
1551 * buffer flip, new buffer will
1552 * replace currently active one,
1553 * mark currently active for cleanup
1554 */
1555 buf = list_next_entry(tmp, pipe_list);
1556 __pipe_buf_mark_cleanup(mfd, tmp);
1557 }
1558 break;
1559 default:
1560 pr_err("invalid state of buf %pK=%d\n",
1561 buf, buf->state);
1562 WARN_ON(1);
1563 break;
1564 }
1565 }
1566
1567 /* ensure pipes are reconfigured after power off/on */
1568 if (ctl->play_cnt == 0)
1569 pipe->params_changed++;
1570
1571 if (buf && (buf->state == MDP_BUF_STATE_READY)) {
1572 buf->state = MDP_BUF_STATE_ACTIVE;
1573 ret = mdss_mdp_data_map(buf, false, DMA_TO_DEVICE);
1574 } else if (!pipe->params_changed &&
1575 !mdss_mdp_is_roi_changed(pipe->mfd)) {
1576
1577 /*
1578 * no update for the given pipe nor any change in the
1579 * ROI so skip pipe programming and continue with next.
1580 */
1581 continue;
1582 } else if (buf) {
1583 WARN_ON(buf->state != MDP_BUF_STATE_ACTIVE);
1584 pr_debug("requeueing active buffer on pnum=%d\n",
1585 pipe->num);
1586 } else if ((pipe->flags & MDP_SOLID_FILL) == 0) {
1587 pr_warn("commit without buffer on pipe %d\n",
1588 pipe->num);
1589 ret = -EINVAL;
1590 }
1591 /*
1592 * if we reach here without errors and buf == NULL
1593 * then solid fill will be set
1594 */
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301595 if (!IS_ERR_VALUE((unsigned long)ret))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301596 ret = mdss_mdp_pipe_queue_data(pipe, buf);
1597
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301598 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301599 pr_warn("Unable to queue data for pnum=%d rect=%d\n",
1600 pipe->num, pipe->multirect.num);
1601
1602 /*
1603 * If we fail for a multi-rect pipe, unstage both rects
1604 * so we don't leave the pipe configured in multi-rect
1605 * mode with only one rectangle staged.
1606 */
1607 if (pipe->multirect.mode !=
1608 MDSS_MDP_PIPE_MULTIRECT_NONE) {
1609 struct mdss_mdp_pipe *next_pipe =
1610 (struct mdss_mdp_pipe *)
1611 pipe->multirect.next;
1612
1613 if (next_pipe) {
1614 struct mdss_mdp_data *next_buf =
1615 list_first_entry_or_null(
1616 &next_pipe->buf_queue,
1617 struct mdss_mdp_data,
1618 pipe_list);
1619
1620 __unstage_pipe_and_clean_buf(mfd,
1621 next_pipe, next_buf);
1622 } else {
1623 pr_warn("cannot find rect pnum=%d\n",
1624 pipe->num);
1625 }
1626 }
1627
1628 __unstage_pipe_and_clean_buf(mfd, pipe, buf);
1629 }
1630 }
1631
1632 return 0;
1633}
1634
1635static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd)
1636{
1637 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1638
1639 mdss_mdp_display_commit(ctl, NULL, NULL);
1640 mdss_mdp_display_wait4comp(ctl);
1641
1642 /* unstage any recovery pipes and re-queue used pipes */
1643 mdss_mdp_mixer_unstage_all(ctl->mixer_left);
1644 mdss_mdp_mixer_unstage_all(ctl->mixer_right);
1645
1646 __overlay_queue_pipes(mfd);
1647
1648 mdss_mdp_display_commit(ctl, NULL, NULL);
1649 mdss_mdp_display_wait4comp(ctl);
1650}
1651
1652static int mdss_mdp_commit_cb(enum mdp_commit_stage_type commit_stage,
1653 void *data)
1654{
1655 int ret = 0;
1656 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
1657 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1658 struct mdss_mdp_ctl *ctl;
1659
1660 switch (commit_stage) {
1661 case MDP_COMMIT_STAGE_SETUP_DONE:
1662 ctl = mfd_to_ctl(mfd);
1663 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
1664 mdp5_data->kickoff_released = true;
1665 mutex_unlock(&mdp5_data->ov_lock);
1666 break;
1667 case MDP_COMMIT_STAGE_READY_FOR_KICKOFF:
1668 mutex_lock(&mdp5_data->ov_lock);
1669 break;
1670 default:
1671 pr_err("Invalid commit stage %x", commit_stage);
1672 break;
1673 }
1674
1675 return ret;
1676}
1677
1678/**
1679 * __is_roi_valid() - Check if ctl roi is valid for a given pipe.
1680 * @pipe: pipe to check against.
1681 * @l_roi: roi of the left ctl path.
1682 * @r_roi: roi of the right ctl path.
1683 *
1684 * Validate roi against pipe's destination rectangle by checking following
1685 * conditions. If any of these conditions are met then return failure,
1686 * success otherwise.
1687 *
1688 * 1. Pipe has scaling and pipe's destination is intersecting with roi.
1689 * 2. Pipe's destination and roi do not overlap, In such cases, pipe should
1690 * not be part of used list and should have been omitted by user program.
1691 */
1692static bool __is_roi_valid(struct mdss_mdp_pipe *pipe,
1693 struct mdss_rect *l_roi, struct mdss_rect *r_roi)
1694{
1695 bool ret = true;
1696 bool is_right_mixer = pipe->mixer_left->is_right_mixer;
1697 struct mdss_rect roi = is_right_mixer ? *r_roi : *l_roi;
1698 struct mdss_rect dst = pipe->dst;
1699 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1700 u32 left_lm_w = left_lm_w_from_mfd(pipe->mfd);
1701
1702 if (pipe->src_split_req) {
1703 if (roi.w) {
1704 /* left_roi is valid */
1705 roi.w += r_roi->w;
1706 } else {
1707 /*
1708 * if we come here then left_roi is zero but pipe's
1709 * output is crossing LM boundary if it was Full Screen
1710 * update. In such case, if right ROI's (x+w) is less
1711 * than pipe's dst_x then #2 check will fail even
1712 * though in full coordinate system it is valid.
1713 * ex:
1714 * left_lm_w = 800;
1715 * pipe->dst.x = 400;
1716 * pipe->dst.w = 800;
1717 * r_roi.x + r_roi.w = 300;
1718 * To avoid such pitfall, extend ROI for comparison.
1719 */
1720 roi.w += left_lm_w + r_roi->w;
1721 }
1722 }
1723
1724 if (mdata->has_src_split && is_right_mixer)
1725 dst.x -= left_lm_w;
1726
1727 /* condition #1 above */
1728 if ((pipe->scaler.enable) ||
1729 (pipe->src.w != dst.w) || (pipe->src.h != dst.h)) {
1730 struct mdss_rect res;
1731
1732 mdss_mdp_intersect_rect(&res, &dst, &roi);
1733
1734 if (!mdss_rect_cmp(&res, &dst)) {
1735 pr_err("error. pipe%d has scaling and its output is interesecting with roi.\n",
1736 pipe->num);
1737 pr_err("pipe_dst:-> %d %d %d %d roi:-> %d %d %d %d\n",
1738 dst.x, dst.y, dst.w, dst.h,
1739 roi.x, roi.y, roi.w, roi.h);
1740 ret = false;
1741 goto end;
1742 }
1743 }
1744
1745 /* condition #2 above */
1746 if (!mdss_rect_overlap_check(&dst, &roi)) {
1747 pr_err("error. pipe%d's output is outside of ROI.\n",
1748 pipe->num);
1749 ret = false;
1750 }
1751end:
1752 return ret;
1753}
1754
1755int mdss_mode_switch(struct msm_fb_data_type *mfd, u32 mode)
1756{
1757 struct mdss_rect l_roi, r_roi;
1758 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1759 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1760 struct mdss_mdp_ctl *sctl;
1761 int rc = 0;
1762
1763 pr_debug("fb%d switch to mode=%x\n", mfd->index, mode);
1764 ATRACE_FUNC();
1765
1766 ctl->pending_mode_switch = mode;
1767 sctl = mdss_mdp_get_split_ctl(ctl);
1768 if (sctl)
1769 sctl->pending_mode_switch = mode;
1770
1771 /* No need for mode validation. It has been done in ioctl call */
1772 if (mode == SWITCH_RESOLUTION) {
1773 if (ctl->ops.reconfigure) {
1774 /* wait for previous frame to complete before switch */
1775 if (ctl->ops.wait_pingpong)
1776 rc = ctl->ops.wait_pingpong(ctl, NULL);
1777 if (!rc && sctl && sctl->ops.wait_pingpong)
1778 rc = sctl->ops.wait_pingpong(sctl, NULL);
1779 if (rc) {
1780 pr_err("wait for pp failed before resolution switch\n");
1781 return rc;
1782 }
1783
1784 /*
1785 * Configure the mixer parameters before the switch as
1786 * the DSC parameter calculation is based on the mixer
1787 * ROI. And set it to full ROI as driver expects the
1788 * first frame after the resolution switch to be a
1789 * full frame update.
1790 */
1791 if (ctl->mixer_left) {
1792 l_roi = (struct mdss_rect) {0, 0,
1793 ctl->mixer_left->width,
1794 ctl->mixer_left->height};
1795 ctl->mixer_left->roi_changed = true;
1796 ctl->mixer_left->valid_roi = true;
1797 }
1798 if (ctl->mixer_right) {
1799 r_roi = (struct mdss_rect) {0, 0,
1800 ctl->mixer_right->width,
1801 ctl->mixer_right->height};
1802 ctl->mixer_right->roi_changed = true;
1803 ctl->mixer_right->valid_roi = true;
1804 }
1805 mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
1806
1807 mutex_lock(&mdp5_data->ov_lock);
1808 ctl->ops.reconfigure(ctl, mode, 1);
1809 mutex_unlock(&mdp5_data->ov_lock);
1810 /*
1811 * For Video mode panels, reconfigure is not defined.
1812 * So doing an explicit ctrl stop during resolution switch
1813 * to balance the ctrl start at the end of this function.
1814 */
1815 } else {
1816 mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
1817 }
1818 } else if (mode == MIPI_CMD_PANEL) {
1819 /*
1820 * Need to reset roi if there was partial update in previous
1821 * Command frame
1822 */
1823 l_roi = (struct mdss_rect){0, 0,
1824 ctl->mixer_left->width,
1825 ctl->mixer_left->height};
1826 if (ctl->mixer_right) {
1827 r_roi = (struct mdss_rect) {0, 0,
1828 ctl->mixer_right->width,
1829 ctl->mixer_right->height};
1830 }
1831 mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
1832 mdss_mdp_switch_roi_reset(ctl);
1833
1834 mdss_mdp_switch_to_cmd_mode(ctl, 1);
1835 mdss_mdp_update_panel_info(mfd, 1, 0);
1836 mdss_mdp_switch_to_cmd_mode(ctl, 0);
1837 mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
1838 } else if (mode == MIPI_VIDEO_PANEL) {
1839 if (ctl->ops.wait_pingpong)
1840 rc = ctl->ops.wait_pingpong(ctl, NULL);
1841 mdss_mdp_update_panel_info(mfd, 0, 0);
1842 mdss_mdp_switch_to_vid_mode(ctl, 1);
1843 mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
1844 mdss_mdp_switch_to_vid_mode(ctl, 0);
1845 } else {
1846 pr_err("Invalid mode switch arg %d\n", mode);
1847 return -EINVAL;
1848 }
1849
1850 mdss_mdp_ctl_start(ctl, true);
1851 ATRACE_END(__func__);
1852
1853 return 0;
1854}
1855
1856int mdss_mode_switch_post(struct msm_fb_data_type *mfd, u32 mode)
1857{
1858 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1859 struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
1860 struct dsi_panel_clk_ctrl clk_ctrl;
1861 int rc = 0;
1862 u32 frame_rate = 0;
1863
1864 if (mode == MIPI_VIDEO_PANEL) {
1865 /*
1866 * Need to make sure one frame has been sent in
1867 * video mode prior to issuing the mode switch
1868 * DCS to panel.
1869 */
1870 frame_rate = mdss_panel_get_framerate
1871 (&(ctl->panel_data->panel_info),
1872 FPS_RESOLUTION_HZ);
1873 if (!(frame_rate >= 24 && frame_rate <= 240))
1874 frame_rate = 24;
1875 frame_rate = ((1000/frame_rate) + 1);
1876 msleep(frame_rate);
1877
1878 pr_debug("%s, start\n", __func__);
1879 rc = mdss_mdp_ctl_intf_event(ctl,
1880 MDSS_EVENT_DSI_DYNAMIC_SWITCH,
1881 (void *) MIPI_VIDEO_PANEL, CTL_INTF_EVENT_FLAG_DEFAULT);
1882 pr_debug("%s, end\n", __func__);
1883 } else if (mode == MIPI_CMD_PANEL) {
1884 /*
1885 * Needed to balance out clk refcount when going
1886 * from video to command. This allows for idle
1887 * power collapse to work as intended.
1888 */
1889 clk_ctrl.state = MDSS_DSI_CLK_OFF;
1890 clk_ctrl.client = DSI_CLK_REQ_DSI_CLIENT;
1891 if (sctl)
1892 mdss_mdp_ctl_intf_event(sctl,
1893 MDSS_EVENT_PANEL_CLK_CTRL, (void *)&clk_ctrl,
1894 CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
1895
1896 mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL,
1897 (void *)&clk_ctrl, CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
1898 } else if (mode == SWITCH_RESOLUTION) {
1899 if (ctl->ops.reconfigure)
1900 rc = ctl->ops.reconfigure(ctl, mode, 0);
1901 }
1902 ctl->pending_mode_switch = 0;
1903 if (sctl)
1904 sctl->pending_mode_switch = 0;
1905
1906 return rc;
1907}
1908
1909static void __validate_and_set_roi(struct msm_fb_data_type *mfd,
1910 struct mdp_display_commit *commit)
1911{
1912 struct mdss_mdp_pipe *pipe;
1913 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1914 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1915 struct mdss_rect l_roi = {0}, r_roi = {0};
1916 struct mdp_rect tmp_roi = {0};
1917 bool skip_partial_update = true;
1918
1919 if (!commit)
1920 goto set_roi;
1921
1922 if (!memcmp(&commit->l_roi, &tmp_roi, sizeof(tmp_roi)) &&
1923 !memcmp(&commit->r_roi, &tmp_roi, sizeof(tmp_roi)))
1924 goto set_roi;
1925
1926 rect_copy_mdp_to_mdss(&commit->l_roi, &l_roi);
1927 rect_copy_mdp_to_mdss(&commit->r_roi, &r_roi);
1928
1929 pr_debug("input: l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
1930 l_roi.x, l_roi.y, l_roi.w, l_roi.h,
1931 r_roi.x, r_roi.y, r_roi.w, r_roi.h);
1932
1933 /*
1934 * Configure full ROI
1935 * - If partial update is disabled
1936 * - If it is the first frame update after dynamic resolution switch
1937 */
1938 if (!ctl->panel_data->panel_info.partial_update_enabled
1939 || (ctl->pending_mode_switch == SWITCH_RESOLUTION))
1940 goto set_roi;
1941
1942 skip_partial_update = false;
1943
1944 if (is_split_lm(mfd) && mdp5_data->mdata->has_src_split) {
1945 u32 left_lm_w = left_lm_w_from_mfd(mfd);
1946 struct mdss_rect merged_roi = l_roi;
1947
1948 /*
1949 * When source split is enabled on split LM displays,
1950 * user program merges left and right ROI and sends
1951 * it through l_roi. Split this merged ROI into
1952 * left/right ROI for validation.
1953 */
1954 mdss_rect_split(&merged_roi, &l_roi, &r_roi, left_lm_w);
1955
1956 /*
1957 * When source split is enabled on split LM displays,
1958 * it is a HW requirement that both LM have same width
1959 * if update is on both sides. Since ROIs are
1960 * generated by user-land program, validate against
1961 * this requirement.
1962 */
1963 if (l_roi.w && r_roi.w && (l_roi.w != r_roi.w)) {
1964 pr_err("error. ROI's do not match. violating src_split requirement\n");
1965 pr_err("l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
1966 l_roi.x, l_roi.y, l_roi.w, l_roi.h,
1967 r_roi.x, r_roi.y, r_roi.w, r_roi.h);
1968 skip_partial_update = true;
1969 goto set_roi;
1970 }
1971 }
1972
1973 list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
1974 if (!__is_roi_valid(pipe, &l_roi, &r_roi)) {
1975 skip_partial_update = true;
1976 pr_err("error. invalid pu config for pipe%d: %d,%d,%d,%d\n",
1977 pipe->num,
1978 pipe->dst.x, pipe->dst.y,
1979 pipe->dst.w, pipe->dst.h);
1980 break;
1981 }
1982 }
1983
1984set_roi:
1985 if (skip_partial_update) {
1986 l_roi = (struct mdss_rect){0, 0,
1987 ctl->mixer_left->width,
1988 ctl->mixer_left->height};
1989 if (ctl->mixer_right) {
1990 r_roi = (struct mdss_rect) {0, 0,
1991 ctl->mixer_right->width,
1992 ctl->mixer_right->height};
1993 }
1994 }
1995
1996 pr_debug("after processing: %s l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
1997 (l_roi.w && l_roi.h && r_roi.w && r_roi.h) ? "left+right" :
1998 ((l_roi.w && l_roi.h) ? "left-only" : "right-only"),
1999 l_roi.x, l_roi.y, l_roi.w, l_roi.h,
2000 r_roi.x, r_roi.y, r_roi.w, r_roi.h);
2001
2002 mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
2003}
2004
2005static bool __is_supported_candence(int cadence)
2006{
2007 return (cadence == FRC_CADENCE_22) ||
2008 (cadence == FRC_CADENCE_23) ||
2009 (cadence == FRC_CADENCE_23223);
2010}
2011
2012/* compute how many vsyncs between these 2 timestamp */
2013static int __compute_vsync_diff(s64 cur_ts,
2014 s64 base_ts, int display_fp1000s)
2015{
2016 int vsync_diff;
2017 int round_up = 0;
2018 s64 ts_diff = (cur_ts - base_ts) * display_fp1000s;
2019
2020 do_div(ts_diff, 1000000);
2021 vsync_diff = (int)ts_diff;
2022 /*
2023 * In most case DIV_ROUND_UP_ULL is enough, but calculation might be
2024 * impacted by possible jitter when vsync_diff is close to boundaries.
2025 * E.g., we have 30fps like 12.0->13.998->15.999->18.0->19.998->21.999
2026 * and 7460.001->7462.002->7464.0->7466.001->7468.002. DIV_ROUND_UP_ULL
2027 * fails in the later case.
2028 */
2029 round_up = ((vsync_diff % 1000) >= 900) ? 1 : 0;
2030 /* round up vsync count to accommodate fractions: base & diff */
2031 vsync_diff = (vsync_diff / 1000) + round_up + 1;
2032 return vsync_diff;
2033}
2034
2035static bool __validate_frc_info(struct mdss_mdp_frc_info *frc_info)
2036{
2037 struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
2038 struct mdss_mdp_frc_data *last_frc = &frc_info->last_frc;
2039 struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
2040
2041 pr_debug("frc: cur_fcnt=%d, cur_ts=%lld, last_fcnt=%d, last_ts=%lld, base_fcnt=%d, base_ts=%lld last_v_cnt=%d, last_repeat=%d base_v_cnt=%d\n",
2042 cur_frc->frame_cnt, cur_frc->timestamp,
2043 last_frc->frame_cnt, last_frc->timestamp,
2044 base_frc->frame_cnt, base_frc->timestamp,
2045 frc_info->last_vsync_cnt, frc_info->last_repeat,
2046 frc_info->base_vsync_cnt);
2047
2048 if ((cur_frc->frame_cnt == last_frc->frame_cnt) &&
2049 (cur_frc->timestamp == last_frc->timestamp)) {
2050 /* ignore repeated frame: video w/ UI layers */
2051 pr_debug("repeated frame input\n");
2052 return false;
2053 }
2054
2055 return true;
2056}
2057
2058static void __init_cadence_calc(struct mdss_mdp_frc_cadence_calc *calc)
2059{
2060 memset(calc, 0, sizeof(struct mdss_mdp_frc_cadence_calc));
2061}
2062
2063static int __calculate_cadence_id(struct mdss_mdp_frc_info *frc_info, int cnt)
2064{
2065 struct mdss_mdp_frc_cadence_calc *calc = &frc_info->calc;
2066 struct mdss_mdp_frc_data *first = &calc->samples[0];
2067 struct mdss_mdp_frc_data *last = &calc->samples[cnt-1];
2068 s64 ts_diff =
2069 (last->timestamp - first->timestamp)
2070 * frc_info->display_fp1000s;
2071 u32 fcnt_diff =
2072 last->frame_cnt - first->frame_cnt;
2073 u32 fps_ratio;
2074 u32 cadence_id = FRC_CADENCE_NONE;
2075
2076 do_div(ts_diff, fcnt_diff);
2077 fps_ratio = (u32)ts_diff;
2078
2079 if ((fps_ratio > FRC_CADENCE_23_RATIO_LOW) &&
2080 (fps_ratio < FRC_CADENCE_23_RATIO_HIGH))
2081 cadence_id = FRC_CADENCE_23;
2082 else if ((fps_ratio > FRC_CADENCE_22_RATIO_LOW) &&
2083 (fps_ratio < FRC_CADENCE_22_RATIO_HIGH))
2084 cadence_id = FRC_CADENCE_22;
2085 else if ((fps_ratio > FRC_CADENCE_23223_RATIO_LOW) &&
2086 (fps_ratio < FRC_CADENCE_23223_RATIO_HIGH))
2087 cadence_id = FRC_CADENCE_23223;
2088
2089 pr_debug("frc: first=%lld, last=%lld, cnt=%d, fps_ratio=%u, cadence_id=%d\n",
2090 first->timestamp, last->timestamp, fcnt_diff,
2091 fps_ratio, cadence_id);
2092
2093 return cadence_id;
2094}
2095
2096static void __init_seq_gen(struct mdss_mdp_frc_seq_gen *gen, int cadence_id)
2097{
2098 int cadence22[2] = {2, 2};
2099 int cadence23[2] = {2, 3};
2100 int cadence23223[5] = {2, 3, 2, 2, 3};
2101 int *cadence = NULL;
2102 int len = 0;
2103
2104 memset(gen, 0, sizeof(struct mdss_mdp_frc_seq_gen));
2105 gen->pos = -EBADSLT;
2106 gen->base = -1;
2107
2108 switch (cadence_id) {
2109 case FRC_CADENCE_22:
2110 cadence = cadence22;
2111 len = 2;
2112 break;
2113 case FRC_CADENCE_23:
2114 cadence = cadence23;
2115 len = 2;
2116 break;
2117 case FRC_CADENCE_23223:
2118 cadence = cadence23223;
2119 len = 5;
2120 break;
2121 default:
2122 break;
2123 }
2124
2125 if (len > 0) {
2126 memcpy(gen->seq, cadence, len * sizeof(int));
2127 gen->len = len;
2128 gen->retry = 0;
2129 }
2130
2131 pr_debug("init sequence, cadence=%d len=%d\n", cadence_id, len);
2132}
2133
2134static int __match_sequence(struct mdss_mdp_frc_seq_gen *gen)
2135{
2136 int pos, i;
2137 int len = gen->len;
2138
2139 /* use default position if many attempts have failed */
2140 if (gen->retry++ >= FRC_CADENCE_SEQUENCE_MAX_RETRY)
2141 return 0;
2142
2143 for (pos = 0; pos < len; pos++) {
2144 for (i = 0; i < len; i++) {
2145 if (gen->cache[(i+len-1) % len]
2146 != gen->seq[(pos+i) % len])
2147 break;
2148 }
2149 if (i == len)
2150 return pos;
2151 }
2152
2153 return -EBADSLT;
2154}
2155
2156static void __reset_cache(struct mdss_mdp_frc_seq_gen *gen)
2157{
2158 memset(gen->cache, 0, gen->len * sizeof(int));
2159 gen->base = -1;
2160}
2161
2162static void __cache_last(struct mdss_mdp_frc_seq_gen *gen, int expected_vsync)
2163{
2164 int i = 0;
2165
2166 /* only cache last in case of pre-defined cadence */
2167 if ((gen->pos < 0) && (gen->len > 0)) {
2168 /* set first sample's expected vsync as base */
2169 if (gen->base < 0) {
2170 gen->base = expected_vsync;
2171 return;
2172 }
2173
2174 /* cache is 0 if not filled */
2175 while (gen->cache[i] && (i < gen->len))
2176 i++;
2177
2178 gen->cache[i] = expected_vsync - gen->base;
2179 gen->base = expected_vsync;
2180
2181 if (i == (gen->len - 1)) {
2182 /* find init pos in sequence when cache is full */
2183 gen->pos = __match_sequence(gen);
2184 /* reset cache and re-collect samples for matching */
2185 if (gen->pos < 0)
2186 __reset_cache(gen);
2187 }
2188 }
2189}
2190
2191static inline bool __is_seq_gen_matched(struct mdss_mdp_frc_seq_gen *gen)
2192{
2193 return (gen->len > 0) && (gen->pos >= 0);
2194}
2195
2196static int __expected_repeat(struct mdss_mdp_frc_seq_gen *gen)
2197{
2198 int next_repeat = -1;
2199
2200 if (__is_seq_gen_matched(gen)) {
2201 next_repeat = gen->seq[gen->pos];
2202 gen->pos = (gen->pos + 1) % gen->len;
2203 }
2204
2205 return next_repeat;
2206}
2207
2208static bool __is_display_fps_changed(struct msm_fb_data_type *mfd,
2209 struct mdss_mdp_frc_info *frc_info)
2210{
2211 bool display_fps_changed = false;
2212 u32 display_fp1000s = mdss_panel_get_framerate(mfd->panel_info,
2213 FPS_RESOLUTION_KHZ);
2214
2215 if (frc_info->display_fp1000s != display_fp1000s) {
2216 pr_debug("fps changes from %d to %d\n",
2217 frc_info->display_fp1000s, display_fp1000s);
2218 display_fps_changed = true;
2219 }
2220
2221 return display_fps_changed;
2222}
2223
2224static bool __is_video_fps_changed(struct mdss_mdp_frc_info *frc_info)
2225{
2226 bool video_fps_changed = false;
2227
2228 if ((frc_info->cur_frc.frame_cnt - frc_info->video_stat.frame_cnt)
2229 == FRC_VIDEO_FPS_DETECT_WINDOW) {
2230 s64 delta_t = frc_info->cur_frc.timestamp -
2231 frc_info->video_stat.timestamp;
2232
2233 if (frc_info->video_stat.last_delta) {
2234 video_fps_changed =
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302235 abs(delta_t - frc_info->video_stat.last_delta)
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302236 > (FRC_VIDEO_FPS_CHANGE_THRESHOLD_US *
2237 FRC_VIDEO_FPS_DETECT_WINDOW);
2238
2239 if (video_fps_changed)
2240 pr_info("video fps changed from [%d]%lld to [%d]%lld\n",
2241 frc_info->video_stat.frame_cnt,
2242 frc_info->video_stat.last_delta,
2243 frc_info->cur_frc.frame_cnt,
2244 delta_t);
2245 }
2246
2247 frc_info->video_stat.frame_cnt = frc_info->cur_frc.frame_cnt;
2248 frc_info->video_stat.timestamp = frc_info->cur_frc.timestamp;
2249 frc_info->video_stat.last_delta = delta_t;
2250 }
2251
2252 return video_fps_changed;
2253}
2254
2255static bool __is_video_seeking(struct mdss_mdp_frc_info *frc_info)
2256{
2257 s64 ts_diff =
2258 frc_info->cur_frc.timestamp - frc_info->last_frc.timestamp;
2259 bool video_seek = false;
2260
2261 video_seek = (ts_diff < 0)
2262 || (ts_diff > FRC_VIDEO_TS_DELTA_THRESHOLD_US);
2263
2264 if (video_seek)
2265 pr_debug("video seeking: %lld -> %lld\n",
2266 frc_info->last_frc.timestamp,
2267 frc_info->cur_frc.timestamp);
2268
2269 return video_seek;
2270}
2271
2272static bool __is_buffer_dropped(struct mdss_mdp_frc_info *frc_info)
2273{
2274 int buffer_drop_cnt
2275 = frc_info->cur_frc.frame_cnt - frc_info->last_frc.frame_cnt;
2276
2277 if (buffer_drop_cnt > 1) {
2278 struct mdss_mdp_frc_drop_stat *drop_stat = &frc_info->drop_stat;
2279
2280 /* collect dropping statistics */
2281 if (!drop_stat->drop_cnt)
2282 drop_stat->frame_cnt = frc_info->last_frc.frame_cnt;
2283
2284 drop_stat->drop_cnt++;
2285
2286 pr_info("video buffer drop from %d to %d\n",
2287 frc_info->last_frc.frame_cnt,
2288 frc_info->cur_frc.frame_cnt);
2289 }
2290 return buffer_drop_cnt > 1;
2291}
2292
2293static bool __is_too_many_drops(struct mdss_mdp_frc_info *frc_info)
2294{
2295 struct mdss_mdp_frc_drop_stat *drop_stat = &frc_info->drop_stat;
2296 bool too_many = false;
2297
2298 if (drop_stat->drop_cnt > FRC_MAX_VIDEO_DROPPING_CNT) {
2299 too_many = (frc_info->cur_frc.frame_cnt - drop_stat->frame_cnt
2300 < FRC_VIDEO_DROP_TOLERANCE_WINDOW);
2301 frc_info->drop_stat.drop_cnt = 0;
2302 }
2303
2304 return too_many;
2305}
2306
2307static bool __is_video_cnt_rollback(struct mdss_mdp_frc_info *frc_info)
2308{
2309 /* video frame_cnt is assumed to increase monotonically */
2310 bool video_rollback
2311 = (frc_info->cur_frc.frame_cnt < frc_info->last_frc.frame_cnt)
2312 || (frc_info->cur_frc.frame_cnt <
2313 frc_info->base_frc.frame_cnt);
2314
2315 if (video_rollback)
2316 pr_info("video frame_cnt rolls back from %d to %d\n",
2317 frc_info->last_frc.frame_cnt,
2318 frc_info->cur_frc.frame_cnt);
2319
2320 return video_rollback;
2321}
2322
2323static bool __is_video_pause(struct msm_fb_data_type *mfd,
2324 struct mdss_mdp_frc_info *frc_info)
2325{
2326 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2327 bool video_pause =
2328 (frc_info->cur_frc.frame_cnt - frc_info->last_frc.frame_cnt
2329 == 1)
2330 && (ctl->vsync_cnt - frc_info->last_vsync_cnt >
2331 FRC_VIDEO_PAUSE_THRESHOLD);
2332
2333 if (video_pause)
2334 pr_debug("video paused: vsync elapsed %d\n",
2335 ctl->vsync_cnt - frc_info->last_vsync_cnt);
2336
2337 return video_pause;
2338}
2339
2340/*
2341 * Workaround for some cases that video has the same timestamp for
2342 * different frame. E.g., video player might provide the same frame
2343 * twice to codec when seeking/flushing.
2344 */
2345static bool __is_timestamp_duplicated(struct mdss_mdp_frc_info *frc_info)
2346{
2347 bool ts_dup =
2348 (frc_info->cur_frc.frame_cnt != frc_info->last_frc.frame_cnt)
2349 && (frc_info->cur_frc.timestamp
2350 == frc_info->last_frc.timestamp);
2351
2352 if (ts_dup)
2353 pr_info("timestamp of frame %d and %d are duplicated\n",
2354 frc_info->last_frc.frame_cnt,
2355 frc_info->cur_frc.frame_cnt);
2356
2357 return ts_dup;
2358}
2359
2360static void __set_frc_base(struct msm_fb_data_type *mfd,
2361 struct mdss_mdp_frc_info *frc_info)
2362{
2363 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2364
2365 frc_info->base_vsync_cnt = ctl->vsync_cnt;
2366 frc_info->base_frc = frc_info->cur_frc;
2367 frc_info->last_frc = frc_info->cur_frc;
2368 frc_info->last_repeat = 0;
2369 frc_info->last_vsync_cnt = 0;
2370 frc_info->cadence_id = FRC_CADENCE_NONE;
2371 frc_info->video_stat.last_delta = 0;
2372 frc_info->video_stat.frame_cnt = frc_info->cur_frc.frame_cnt;
2373 frc_info->video_stat.timestamp = frc_info->cur_frc.timestamp;
2374 frc_info->display_fp1000s =
2375 mdss_panel_get_framerate(mfd->panel_info, FPS_RESOLUTION_KHZ);
2376
2377
2378 pr_debug("frc_base: vsync_cnt=%d frame_cnt=%d timestamp=%lld\n",
2379 frc_info->base_vsync_cnt, frc_info->cur_frc.frame_cnt,
2380 frc_info->cur_frc.timestamp);
2381}
2382
2383/* calculate when we'd like to kickoff current frame based on its timestamp */
2384static int __calculate_remaining_vsync(struct msm_fb_data_type *mfd,
2385 struct mdss_mdp_frc_info *frc_info)
2386{
2387 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2388 struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
2389 struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
2390 int vsync_diff, expected_vsync_cnt, remaining_vsync;
2391
2392 /* how many vsync intervals between current & base */
2393 vsync_diff = __compute_vsync_diff(cur_frc->timestamp,
2394 base_frc->timestamp, frc_info->display_fp1000s);
2395
2396 /* expected vsync where we'd like to kickoff current frame */
2397 expected_vsync_cnt = frc_info->base_vsync_cnt + vsync_diff;
2398 /* how many remaining vsync we need display till kickoff */
2399 remaining_vsync = expected_vsync_cnt - ctl->vsync_cnt;
2400
2401 pr_debug("frc: expected_vsync_cnt=%d, cur_vsync_cnt=%d, remaining=%d\n",
2402 expected_vsync_cnt, ctl->vsync_cnt, remaining_vsync);
2403
2404 return remaining_vsync;
2405}
2406
2407/* tune latency computed previously if possible jitter exists */
2408static int __tune_possible_jitter(struct msm_fb_data_type *mfd,
2409 struct mdss_mdp_frc_info *frc_info, int remaining_vsync)
2410{
2411 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2412 int cadence_id = frc_info->cadence_id;
2413 int remaining = remaining_vsync;
2414 int expected_repeat = __expected_repeat(&frc_info->gen);
2415
2416 if (cadence_id && (expected_repeat > 0)) {
2417 int expected_vsync_cnt = remaining + ctl->vsync_cnt;
2418 /* how many times current frame will be repeated */
2419 int cur_repeat = expected_vsync_cnt - frc_info->last_vsync_cnt;
2420
2421 remaining -= cur_repeat - expected_repeat;
2422 pr_debug("frc: tune vsync, input=%d, output=%d, last_repeat=%d, cur_repeat=%d, expected_repeat=%d\n",
2423 remaining_vsync, remaining, frc_info->last_repeat,
2424 cur_repeat, expected_repeat);
2425 }
2426
2427 return remaining;
2428}
2429
2430/* compute how many vsync we still need to wait for keeping cadence */
2431static int __calculate_remaining_repeat(struct msm_fb_data_type *mfd,
2432 struct mdss_mdp_frc_info *frc_info)
2433{
2434 int remaining_vsync = __calculate_remaining_vsync(mfd, frc_info);
2435
2436 remaining_vsync =
2437 __tune_possible_jitter(mfd, frc_info, remaining_vsync);
2438
2439 return remaining_vsync;
2440}
2441
2442static int __repeat_current_frame(struct mdss_mdp_ctl *ctl, int repeat)
2443{
2444 int expected_vsync = ctl->vsync_cnt + repeat;
2445 int cnt = 0;
2446 int ret = 0;
2447
2448 while (ctl->vsync_cnt < expected_vsync) {
2449 cnt++;
2450 if (ctl->ops.wait_vsync_fnc) {
2451 ret = ctl->ops.wait_vsync_fnc(ctl);
2452 if (ret < 0)
2453 break;
2454 }
2455 }
2456
2457 if (ret)
2458 pr_err("wrong waiting: repeat %d, actual: %d\n", repeat, cnt);
2459
2460 return ret;
2461}
2462
2463static void __save_last_frc_info(struct mdss_mdp_ctl *ctl,
2464 struct mdss_mdp_frc_info *frc_info)
2465{
2466 /* save last data */
2467 frc_info->last_frc = frc_info->cur_frc;
2468 frc_info->last_repeat = ctl->vsync_cnt - frc_info->last_vsync_cnt;
2469 frc_info->last_vsync_cnt = ctl->vsync_cnt;
2470}
2471
2472static void cadence_detect_callback(struct mdss_mdp_frc_fsm *frc_fsm)
2473{
2474 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2475
2476 __init_cadence_calc(&frc_info->calc);
2477}
2478
2479static void seq_match_callback(struct mdss_mdp_frc_fsm *frc_fsm)
2480{
2481 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2482
2483 __init_seq_gen(&frc_info->gen, frc_info->cadence_id);
2484}
2485
2486static void frc_disable_callback(struct mdss_mdp_frc_fsm *frc_fsm)
2487{
2488 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2489
2490 frc_info->cadence_id = FRC_CADENCE_DISABLE;
2491}
2492
2493/* default behavior of FRC FSM */
2494static bool __is_frc_state_changed_in_default(struct msm_fb_data_type *mfd,
2495 struct mdss_mdp_frc_info *frc_info)
2496{
2497 /*
2498 * Need change to INIT state in case of 2 changes:
2499 *
2500 * 1) video frame_cnt has been rolled back by codec.
2501 * 2) video fast-foward or rewind. Sometimes video seeking might cause
2502 * buffer drop as well, so check seek ahead of buffer drop in order
2503 * to avoid duplicated check.
2504 * 3) buffer drop.
2505 * 4) display fps has changed.
2506 * 5) video frame rate has changed.
2507 * 6) video pauses. it could be considered as lag case.
2508 * 7) duplicated timestamp of different frames which breaks FRC.
2509 */
2510 return (__is_video_cnt_rollback(frc_info) ||
2511 __is_video_seeking(frc_info) ||
2512 __is_buffer_dropped(frc_info) ||
2513 __is_display_fps_changed(mfd, frc_info) ||
2514 __is_video_fps_changed(frc_info) ||
2515 __is_video_pause(mfd, frc_info) ||
2516 __is_timestamp_duplicated(frc_info));
2517}
2518
2519static void __pre_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
2520{
2521 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
2522 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2523
2524 if (__is_too_many_drops(frc_info)) {
2525 /*
2526 * disable frc when dropping too many buffers, this might happen
2527 * in some extreme cases like video is heavily loaded so any
2528 * extra latency could make things worse.
2529 */
2530 pr_info("disable frc because there're too many drops\n");
2531 mdss_mdp_frc_fsm_change_state(frc_fsm,
2532 FRC_STATE_DISABLE, frc_disable_callback);
2533 mdss_mdp_frc_fsm_update_state(frc_fsm);
2534 } else if (__is_frc_state_changed_in_default(mfd, frc_info)) {
2535 /* FRC status changed so reset to INIT state */
2536 mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
2537 mdss_mdp_frc_fsm_update_state(frc_fsm);
2538 }
2539}
2540
2541static void __do_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
2542{
2543 /* do nothing */
2544}
2545
2546static void __post_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
2547{
2548 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
2549 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2550 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2551
2552 __save_last_frc_info(ctl, frc_info);
2553
2554 /* update frc_fsm state to new state for the next round */
2555 mdss_mdp_frc_fsm_update_state(frc_fsm);
2556}
2557
2558/* behavior of FRC FSM in INIT state */
2559static void __do_frc_in_init_state(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
2560{
2561 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
2562 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2563
2564 __set_frc_base(mfd, frc_info);
2565
2566 mdss_mdp_frc_fsm_change_state(frc_fsm,
2567 FRC_STATE_CADENCE_DETECT, cadence_detect_callback);
2568}
2569
2570/* behavior of FRC FSM in CADENCE_DETECT state */
2571static void __do_frc_in_cadence_detect_state(struct mdss_mdp_frc_fsm *frc_fsm,
2572 void *arg)
2573{
2574 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2575 struct mdss_mdp_frc_cadence_calc *calc = &frc_info->calc;
2576
2577 if (calc->sample_cnt < FRC_CADENCE_DETECT_WINDOW) {
2578 calc->samples[calc->sample_cnt++] = frc_info->cur_frc;
2579 } else {
2580 /*
2581 * Get enough samples and check candence. FRC_CADENCE_23
2582 * and FRC_CADENCE_22 need >= 2 deltas, and >= 5 deltas
2583 * are necessary for computing FRC_CADENCE_23223.
2584 */
2585 u32 cadence_id = FRC_CADENCE_23;
2586 u32 sample_cnt[FRC_MAX_SUPPORT_CADENCE] = {0, 5, 5, 6};
2587
2588 while (cadence_id < FRC_CADENCE_FREE_RUN) {
2589 if (cadence_id ==
2590 __calculate_cadence_id(frc_info,
2591 sample_cnt[cadence_id]))
2592 break;
2593 cadence_id++;
2594 }
2595
2596 frc_info->cadence_id = cadence_id;
2597 pr_info("frc: cadence_id=%d\n", cadence_id);
2598
2599 /* detected supported cadence, start sequence match */
2600 if (__is_supported_candence(frc_info->cadence_id))
2601 mdss_mdp_frc_fsm_change_state(frc_fsm,
2602 FRC_STATE_SEQ_MATCH, seq_match_callback);
2603 else
2604 mdss_mdp_frc_fsm_change_state(frc_fsm,
2605 FRC_STATE_FREERUN, NULL);
2606 }
2607}
2608
2609/* behavior of FRC FSM in SEQ_MATCH state */
2610static void __do_frc_in_seq_match_state(struct mdss_mdp_frc_fsm *frc_fsm,
2611 void *arg)
2612{
2613 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2614 struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
2615 struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
2616 int vsync_diff;
2617
2618 /* how many vsync intervals between current & base */
2619 vsync_diff = __compute_vsync_diff(cur_frc->timestamp,
2620 base_frc->timestamp, frc_info->display_fp1000s);
2621
2622 /* cache vsync diff to compute start pos in cadence */
2623 __cache_last(&frc_info->gen, vsync_diff);
2624
2625 if (__is_seq_gen_matched(&frc_info->gen))
2626 mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_READY, NULL);
2627}
2628
2629/* behavior of FRC FSM in FREE_RUN state */
2630static bool __is_frc_state_changed_in_freerun_state(
2631 struct msm_fb_data_type *mfd,
2632 struct mdss_mdp_frc_info *frc_info)
2633{
2634 /*
2635 * Only need change to INIT state in case of 2 changes:
2636 *
2637 * 1) display fps has changed.
2638 * 2) video frame rate has changed.
2639 */
2640 return (__is_display_fps_changed(mfd, frc_info) ||
2641 __is_video_fps_changed(frc_info));
2642}
2643
2644static void __pre_frc_in_freerun_state(struct mdss_mdp_frc_fsm *frc_fsm,
2645 void *arg)
2646{
2647 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
2648 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2649
2650 /* FRC status changed so reset to INIT state */
2651 if (__is_frc_state_changed_in_freerun_state(mfd, frc_info)) {
2652 /* update state to INIT immediately */
2653 mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
2654 mdss_mdp_frc_fsm_update_state(frc_fsm);
2655 }
2656}
2657
2658/* behavior of FRC FSM in READY state */
2659static void __do_frc_in_ready_state(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
2660{
2661 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
2662 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2663 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2664 struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
2665
2666 int remaining_repeat =
2667 __calculate_remaining_repeat(mfd, frc_info);
2668
2669 mdss_debug_frc_add_kickoff_sample_pre(ctl, frc_info, remaining_repeat);
2670
2671 /* video arrives later than expected */
2672 if (remaining_repeat < 0) {
2673 pr_info("Frame %d lags behind %d vsync\n",
2674 cur_frc->frame_cnt, -remaining_repeat);
2675 mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
2676 remaining_repeat = 0;
2677 }
2678
2679 if (mdss_debug_frc_frame_repeat_disabled())
2680 remaining_repeat = 0;
2681
2682 __repeat_current_frame(ctl, remaining_repeat);
2683
2684 mdss_debug_frc_add_kickoff_sample_post(ctl, frc_info, remaining_repeat);
2685}
2686
2687/* behavior of FRC FSM in DISABLE state */
2688static void __pre_frc_in_disable_state(struct mdss_mdp_frc_fsm *frc_fsm,
2689 void *arg)
2690{
2691 /* do nothing */
2692}
2693
2694static void __post_frc_in_disable_state(struct mdss_mdp_frc_fsm *frc_fsm,
2695 void *arg)
2696{
2697 /* do nothing */
2698}
2699
2700static int __config_secure_display(struct mdss_overlay_private *mdp5_data)
2701{
2702 int panel_type = mdp5_data->ctl->panel_data->panel_info.type;
2703 int sd_enable = -1; /* Since 0 is a valid state, initialize with -1 */
2704 int ret = 0;
2705
2706 if (panel_type == MIPI_CMD_PANEL)
2707 mdss_mdp_display_wait4pingpong(mdp5_data->ctl, true);
2708
2709 /*
2710 * Start secure display session if we are transitioning from non secure
2711 * to secure display.
2712 */
2713 if (mdp5_data->sd_transition_state ==
2714 SD_TRANSITION_NON_SECURE_TO_SECURE)
2715 sd_enable = 1;
2716
2717 /*
2718 * For command mode panels, if we are trasitioning from secure to
2719 * non secure session, disable the secure display, as we've already
2720 * waited for the previous frame transfer.
2721 */
2722 if ((panel_type == MIPI_CMD_PANEL) &&
2723 (mdp5_data->sd_transition_state ==
2724 SD_TRANSITION_SECURE_TO_NON_SECURE))
2725 sd_enable = 0;
2726
2727 if (sd_enable != -1) {
2728 ret = mdss_mdp_secure_display_ctrl(mdp5_data->mdata, sd_enable);
2729 if (!ret)
2730 mdp5_data->sd_enabled = sd_enable;
2731 }
2732
2733 return ret;
2734}
2735
2736/* predefined state table of FRC FSM */
2737static struct mdss_mdp_frc_fsm_state frc_fsm_states[FRC_STATE_MAX] = {
2738 {
2739 .name = "FRC_FSM_INIT",
2740 .state = FRC_STATE_INIT,
2741 .ops = {
2742 .pre_frc = __pre_frc_in_default,
2743 .do_frc = __do_frc_in_init_state,
2744 .post_frc = __post_frc_in_default,
2745 },
2746 },
2747
2748 {
2749 .name = "FRC_FSM_CADENCE_DETECT",
2750 .state = FRC_STATE_CADENCE_DETECT,
2751 .ops = {
2752 .pre_frc = __pre_frc_in_default,
2753 .do_frc = __do_frc_in_cadence_detect_state,
2754 .post_frc = __post_frc_in_default,
2755 },
2756 },
2757
2758 {
2759 .name = "FRC_FSM_SEQ_MATCH",
2760 .state = FRC_STATE_SEQ_MATCH,
2761 .ops = {
2762 .pre_frc = __pre_frc_in_default,
2763 .do_frc = __do_frc_in_seq_match_state,
2764 .post_frc = __post_frc_in_default,
2765 },
2766 },
2767
2768 {
2769 .name = "FRC_FSM_FREERUN",
2770 .state = FRC_STATE_FREERUN,
2771 .ops = {
2772 .pre_frc = __pre_frc_in_freerun_state,
2773 .do_frc = __do_frc_in_default,
2774 .post_frc = __post_frc_in_default,
2775 },
2776 },
2777
2778 {
2779 .name = "FRC_FSM_READY",
2780 .state = FRC_STATE_READY,
2781 .ops = {
2782 .pre_frc = __pre_frc_in_default,
2783 .do_frc = __do_frc_in_ready_state,
2784 .post_frc = __post_frc_in_default,
2785 },
2786 },
2787
2788 {
2789 .name = "FRC_FSM_DISABLE",
2790 .state = FRC_STATE_DISABLE,
2791 .ops = {
2792 .pre_frc = __pre_frc_in_disable_state,
2793 .do_frc = __do_frc_in_default,
2794 .post_frc = __post_frc_in_disable_state,
2795 },
2796 },
2797};
2798
2799/*
2800 * FRC FSM operations:
2801 * mdss_mdp_frc_fsm_init_state: Init FSM state.
2802 * mdss_mdp_frc_fsm_change_state: Change FSM state. The desired state will not
2803 * be effective till update_state is called.
2804 * mdss_mdp_frc_fsm_update_state: Update FSM state. Changed state is effective
2805 * immediately once this function is called.
2806 */
2807void mdss_mdp_frc_fsm_init_state(struct mdss_mdp_frc_fsm *frc_fsm)
2808{
2809 pr_debug("frc_fsm: init frc fsm state\n");
2810 frc_fsm->state = frc_fsm->to_state = frc_fsm_states[FRC_STATE_INIT];
2811 memset(&frc_fsm->frc_info, 0, sizeof(struct mdss_mdp_frc_info));
2812}
2813
2814void mdss_mdp_frc_fsm_change_state(struct mdss_mdp_frc_fsm *frc_fsm,
2815 enum mdss_mdp_frc_state_type state,
2816 void (*cb)(struct mdss_mdp_frc_fsm *frc_fsm))
2817{
2818 if (state != frc_fsm->state.state) {
2819 pr_debug("frc_fsm: state changes from %s to %s\n",
2820 frc_fsm->state.name,
2821 frc_fsm_states[state].name);
2822 frc_fsm->to_state = frc_fsm_states[state];
2823 frc_fsm->cbs.update_state_cb = cb;
2824 }
2825}
2826
2827void mdss_mdp_frc_fsm_update_state(struct mdss_mdp_frc_fsm *frc_fsm)
2828{
2829 if (frc_fsm->to_state.state != frc_fsm->state.state) {
2830 pr_debug("frc_fsm: state updates from %s to %s\n",
2831 frc_fsm->state.name,
2832 frc_fsm->to_state.name);
2833
2834 if (frc_fsm->cbs.update_state_cb)
2835 frc_fsm->cbs.update_state_cb(frc_fsm);
2836
2837 frc_fsm->state = frc_fsm->to_state;
2838 }
2839}
2840
2841static void mdss_mdp_overlay_update_frc(struct msm_fb_data_type *mfd)
2842{
2843 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
2844 struct mdss_mdp_frc_fsm *frc_fsm = mdp5_data->frc_fsm;
2845 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2846
2847 if (__validate_frc_info(frc_info)) {
2848 struct mdss_mdp_frc_fsm_state *state = &frc_fsm->state;
2849
2850 state->ops.pre_frc(frc_fsm, mfd);
2851 state->ops.do_frc(frc_fsm, mfd);
2852 state->ops.post_frc(frc_fsm, mfd);
2853 }
2854}
2855
2856int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
2857 struct mdp_display_commit *data)
2858{
2859 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
2860 struct mdss_mdp_pipe *pipe, *tmp;
2861 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2862 int ret = 0;
2863 struct mdss_mdp_commit_cb commit_cb;
2864 u8 sd_transition_state = 0;
2865
2866 if (!ctl || !ctl->mixer_left)
2867 return -ENODEV;
2868
2869 ATRACE_BEGIN(__func__);
2870 if (ctl->shared_lock) {
2871 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
2872 mutex_lock(ctl->shared_lock);
2873 }
2874
2875 mutex_lock(&mdp5_data->ov_lock);
2876 ctl->bw_pending = 0;
2877 ret = mdss_mdp_overlay_start(mfd);
2878 if (ret) {
2879 pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
2880 mutex_unlock(&mdp5_data->ov_lock);
2881 if (ctl->shared_lock)
2882 mutex_unlock(ctl->shared_lock);
2883 return ret;
2884 }
2885
2886 ret = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302887 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302888 pr_err("iommu attach failed rc=%d\n", ret);
2889 mutex_unlock(&mdp5_data->ov_lock);
2890 if (ctl->shared_lock)
2891 mutex_unlock(ctl->shared_lock);
2892 return ret;
2893 }
2894 mutex_lock(&mdp5_data->list_lock);
2895
2896 if (!ctl->shared_lock)
2897 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
2898
2899 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2900
2901 mdss_mdp_check_ctl_reset_status(ctl);
2902 __validate_and_set_roi(mfd, data);
2903
2904 if (ctl->ops.wait_pingpong && mdp5_data->mdata->serialize_wait4pp)
2905 mdss_mdp_display_wait4pingpong(ctl, true);
2906
2907 sd_transition_state = mdp5_data->sd_transition_state;
2908 if (sd_transition_state != SD_TRANSITION_NONE) {
2909 ret = __config_secure_display(mdp5_data);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302910 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302911 pr_err("Secure session config failed\n");
2912 goto commit_fail;
2913 }
2914 }
2915
2916 /*
2917 * Setup pipe in solid fill before unstaging,
2918 * to ensure no fetches are happening after dettach or reattach.
2919 */
2920 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_cleanup, list) {
2921 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
2922 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
2923 pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
2924 list_move(&pipe->list, &mdp5_data->pipes_destroy);
2925 }
2926
2927 /* call this function before any registers programming */
2928 if (ctl->ops.pre_programming)
2929 ctl->ops.pre_programming(ctl);
2930
2931 ATRACE_BEGIN("sspp_programming");
2932 ret = __overlay_queue_pipes(mfd);
2933 ATRACE_END("sspp_programming");
2934 mutex_unlock(&mdp5_data->list_lock);
2935
2936 mdp5_data->kickoff_released = false;
2937
2938 if (mdp5_data->frc_fsm->enable)
2939 mdss_mdp_overlay_update_frc(mfd);
2940
2941 if (mfd->panel.type == WRITEBACK_PANEL) {
2942 ATRACE_BEGIN("wb_kickoff");
2943 commit_cb.commit_cb_fnc = mdss_mdp_commit_cb;
2944 commit_cb.data = mfd;
2945 ret = mdss_mdp_wfd_kickoff(mdp5_data->wfd, &commit_cb);
2946 ATRACE_END("wb_kickoff");
2947 } else {
2948 ATRACE_BEGIN("display_commit");
2949 commit_cb.commit_cb_fnc = mdss_mdp_commit_cb;
2950 commit_cb.data = mfd;
2951 ret = mdss_mdp_display_commit(mdp5_data->ctl, NULL,
2952 &commit_cb);
2953 ATRACE_END("display_commit");
2954 }
2955 __vsync_set_vsync_handler(mfd);
2956
2957 /*
2958 * release the commit pending flag; we are releasing this flag
2959 * after the commit, since now the transaction status
2960 * in the cmd mode controllers is busy.
2961 */
2962 mfd->atomic_commit_pending = false;
2963
2964 if (!mdp5_data->kickoff_released)
2965 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
2966
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302967 if (IS_ERR_VALUE((unsigned long)ret))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302968 goto commit_fail;
2969
2970 mutex_unlock(&mdp5_data->ov_lock);
2971 mdss_mdp_overlay_update_pm(mdp5_data);
2972
2973 ATRACE_BEGIN("display_wait4comp");
2974 ret = mdss_mdp_display_wait4comp(mdp5_data->ctl);
2975 ATRACE_END("display_wait4comp");
2976 mdss_mdp_splash_cleanup(mfd, true);
2977
2978 /*
2979 * Configure Timing Engine, if new fps was set.
2980 * We need to do this after the wait for vsync
2981 * to guarantee that mdp flush bit and dsi flush
2982 * bit are set within the same vsync period
2983 * regardless of mdp revision.
2984 */
2985 ATRACE_BEGIN("fps_update");
2986 ret = mdss_mdp_ctl_update_fps(ctl);
2987 ATRACE_END("fps_update");
2988
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302989 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302990 pr_err("failed to update fps!\n");
2991 goto commit_fail;
2992 }
2993
2994 mutex_lock(&mdp5_data->ov_lock);
2995 /*
2996 * If we are transitioning from secure to non-secure display,
2997 * disable the secure display.
2998 */
2999 if (mdp5_data->sd_enabled && (sd_transition_state ==
3000 SD_TRANSITION_SECURE_TO_NON_SECURE)) {
3001 ret = mdss_mdp_secure_display_ctrl(mdp5_data->mdata, 0);
3002 if (!ret)
3003 mdp5_data->sd_enabled = 0;
3004 }
3005
3006 mdss_fb_update_notify_update(mfd);
3007commit_fail:
3008 ATRACE_BEGIN("overlay_cleanup");
3009 mdss_mdp_overlay_cleanup(mfd, &mdp5_data->pipes_destroy);
3010 ATRACE_END("overlay_cleanup");
3011 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
3012 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_FLUSHED);
3013 if (!mdp5_data->kickoff_released)
3014 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
3015
3016 mutex_unlock(&mdp5_data->ov_lock);
3017 if (ctl->shared_lock)
3018 mutex_unlock(ctl->shared_lock);
3019 mdss_iommu_ctrl(0);
3020 ATRACE_END(__func__);
3021
3022 return ret;
3023}
3024
3025int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
3026{
3027 struct mdss_mdp_pipe *pipe, *tmp;
3028 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3029 u32 unset_ndx = 0;
3030
3031 mutex_lock(&mdp5_data->list_lock);
3032 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
3033 if (pipe->ndx & ndx) {
3034 if (mdss_mdp_pipe_map(pipe)) {
3035 pr_err("Unable to map used pipe%d ndx=%x\n",
3036 pipe->num, pipe->ndx);
3037 continue;
3038 }
3039
3040 unset_ndx |= pipe->ndx;
3041
3042 pipe->file = NULL;
3043 list_move(&pipe->list, &mdp5_data->pipes_cleanup);
3044
3045 mdss_mdp_pipe_unmap(pipe);
3046
3047 if (unset_ndx == ndx)
3048 break;
3049 }
3050 }
3051 mutex_unlock(&mdp5_data->list_lock);
3052
3053 if (unset_ndx != ndx) {
3054 pr_warn("Unable to unset pipe(s) ndx=0x%x unset=0x%x\n",
3055 ndx, unset_ndx);
3056 return -ENOENT;
3057 }
3058
3059 return 0;
3060}
3061
3062static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
3063{
3064 int ret = 0;
3065 struct mdss_overlay_private *mdp5_data;
3066
3067 if (!mfd)
3068 return -ENODEV;
3069
3070 mdp5_data = mfd_to_mdp5_data(mfd);
3071
3072 if (!mdp5_data || !mdp5_data->ctl)
3073 return -ENODEV;
3074
3075 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
3076 if (ret)
3077 return ret;
3078
3079 if (ndx == BORDERFILL_NDX) {
3080 pr_debug("borderfill disable\n");
3081 mdp5_data->borderfill_enable = false;
3082 ret = 0;
3083 goto done;
3084 }
3085
3086 if (mdss_fb_is_power_off(mfd)) {
3087 ret = -EPERM;
3088 goto done;
3089 }
3090
3091 pr_debug("unset ndx=%x\n", ndx);
3092
3093 ret = mdss_mdp_overlay_release(mfd, ndx);
3094
3095done:
3096 mutex_unlock(&mdp5_data->ov_lock);
3097
3098 return ret;
3099}
3100
3101/**
3102 * mdss_mdp_overlay_release_all() - release any overlays associated with fb dev
3103 * @mfd: Msm frame buffer structure associated with fb device
3104 * @release_all: ignore pid and release all the pipes
3105 *
3106 * Release any resources allocated by calling process, this can be called
3107 * on fb_release to release any overlays/rotator sessions left open.
3108 *
3109 * Return number of resources released
3110 */
3111static int __mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd,
3112 struct file *file)
3113{
3114 struct mdss_mdp_pipe *pipe, *tmp;
3115 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3116 u32 unset_ndx = 0;
3117 int cnt = 0;
3118
3119 pr_debug("releasing all resources for fb%d file:%pK\n",
3120 mfd->index, file);
3121
3122 mutex_lock(&mdp5_data->ov_lock);
3123 mutex_lock(&mdp5_data->list_lock);
3124 if (!mfd->ref_cnt && !list_empty(&mdp5_data->pipes_cleanup)) {
3125 pr_debug("fb%d:: free pipes present in cleanup list",
3126 mfd->index);
3127 cnt++;
3128 }
3129
3130 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
3131 if (!file || pipe->file == file) {
3132 unset_ndx |= pipe->ndx;
3133 pipe->file = NULL;
3134 list_move(&pipe->list, &mdp5_data->pipes_cleanup);
3135 cnt++;
3136 }
3137 }
3138
3139 pr_debug("mfd->ref_cnt=%d unset_ndx=0x%x cnt=%d\n",
3140 mfd->ref_cnt, unset_ndx, cnt);
3141
3142 mutex_unlock(&mdp5_data->list_lock);
3143 mutex_unlock(&mdp5_data->ov_lock);
3144
3145 return cnt;
3146}
3147
3148static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
3149 struct msmfb_overlay_data *req)
3150{
3151 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3152 struct mdss_mdp_pipe *pipe;
3153 struct mdss_mdp_data *src_data;
3154 struct mdp_layer_buffer buffer;
3155 int ret;
3156 u32 flags;
3157
3158 pipe = __overlay_find_pipe(mfd, req->id);
3159 if (!pipe) {
3160 pr_err("pipe ndx=%x doesn't exist\n", req->id);
3161 return -ENODEV;
3162 }
3163
3164 if (pipe->dirty) {
3165 pr_warn("dirty pipe, will not queue pipe pnum=%d\n", pipe->num);
3166 return -ENODEV;
3167 }
3168
3169 ret = mdss_mdp_pipe_map(pipe);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303170 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303171 pr_err("Unable to map used pipe%d ndx=%x\n",
3172 pipe->num, pipe->ndx);
3173 return ret;
3174 }
3175
3176 pr_debug("ov queue pnum=%d\n", pipe->num);
3177
3178 if (pipe->flags & MDP_SOLID_FILL)
3179 pr_warn("Unexpected buffer queue to a solid fill pipe\n");
3180
3181 flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
3182 MDP_SECURE_DISPLAY_OVERLAY_SESSION));
3183
3184 mutex_lock(&mdp5_data->list_lock);
3185 src_data = mdss_mdp_overlay_buf_alloc(mfd, pipe);
3186 if (!src_data) {
3187 pr_err("unable to allocate source buffer\n");
3188 ret = -ENOMEM;
3189 } else {
3190 buffer.width = pipe->img_width;
3191 buffer.height = pipe->img_height;
3192 buffer.format = pipe->src_fmt->format;
3193 ret = mdss_mdp_data_get_and_validate_size(src_data, &req->data,
3194 1, flags, &mfd->pdev->dev, false, DMA_TO_DEVICE,
3195 &buffer);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303196 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303197 mdss_mdp_overlay_buf_free(mfd, src_data);
3198 pr_err("src_data pmem error\n");
3199 }
3200 }
3201 mutex_unlock(&mdp5_data->list_lock);
3202
3203 mdss_mdp_pipe_unmap(pipe);
3204
3205 return ret;
3206}
3207
3208static int mdss_mdp_overlay_play(struct msm_fb_data_type *mfd,
3209 struct msmfb_overlay_data *req)
3210{
3211 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3212 int ret = 0;
3213
3214 pr_debug("play req id=%x\n", req->id);
3215
3216 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
3217 if (ret)
3218 return ret;
3219
3220 if (mdss_fb_is_power_off(mfd)) {
3221 ret = -EPERM;
3222 goto done;
3223 }
3224
3225 if (req->id == BORDERFILL_NDX) {
3226 pr_debug("borderfill enable\n");
3227 mdp5_data->borderfill_enable = true;
3228 ret = mdss_mdp_overlay_free_fb_pipe(mfd);
3229 } else {
3230 ret = mdss_mdp_overlay_queue(mfd, req);
3231 }
3232
3233done:
3234 mutex_unlock(&mdp5_data->ov_lock);
3235
3236 return ret;
3237}
3238
3239static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd)
3240{
3241 struct mdss_mdp_pipe *pipe;
3242 u32 fb_ndx = 0;
3243 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3244
3245 pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl,
3246 MDSS_MDP_MIXER_MUX_LEFT, MDSS_MDP_STAGE_BASE, false);
3247 if (pipe)
3248 fb_ndx |= pipe->ndx;
3249
3250 pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl,
3251 MDSS_MDP_MIXER_MUX_RIGHT, MDSS_MDP_STAGE_BASE, false);
3252 if (pipe)
3253 fb_ndx |= pipe->ndx;
3254
3255 if (fb_ndx) {
3256 pr_debug("unstaging framebuffer pipes %x\n", fb_ndx);
3257 mdss_mdp_overlay_release(mfd, fb_ndx);
3258 }
3259 return 0;
3260}
3261
3262static int mdss_mdp_overlay_get_fb_pipe(struct msm_fb_data_type *mfd,
3263 struct mdss_mdp_pipe **ppipe,
3264 int mixer_mux, bool *pipe_allocated)
3265{
3266 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3267 struct mdss_mdp_pipe *pipe;
3268 int ret = 0;
3269 struct mdp_overlay *req = NULL;
3270
3271 *pipe_allocated = false;
3272 pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl, mixer_mux,
3273 MDSS_MDP_STAGE_BASE, false);
3274
3275 if (pipe == NULL) {
3276 struct fb_info *fbi = mfd->fbi;
3277 struct mdss_mdp_mixer *mixer;
3278 int bpp;
3279 bool rotate_180 = (fbi->var.rotate == FB_ROTATE_UD);
3280 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
3281 bool split_lm = (fbi->var.xres > mdata->max_mixer_width ||
3282 is_split_lm(mfd));
3283 struct mdp_rect left_rect, right_rect;
3284
3285 mixer = mdss_mdp_mixer_get(mdp5_data->ctl,
3286 MDSS_MDP_MIXER_MUX_LEFT);
3287 if (!mixer) {
3288 pr_err("unable to retrieve mixer\n");
3289 return -ENODEV;
3290 }
3291
3292 req = kcalloc(1, sizeof(struct mdp_overlay), GFP_KERNEL);
3293 if (!req)
3294 return -ENOMEM;
3295
3296 bpp = fbi->var.bits_per_pixel / 8;
3297 req->id = MSMFB_NEW_REQUEST;
3298 req->src.format = mfd->fb_imgType;
3299 req->src.height = fbi->var.yres;
3300 req->src.width = fbi->fix.line_length / bpp;
3301
3302 left_rect.x = 0;
3303 left_rect.w = MIN(fbi->var.xres, mixer->width);
3304 left_rect.y = 0;
3305 left_rect.h = req->src.height;
3306
3307 right_rect.x = mixer->width;
3308 right_rect.w = fbi->var.xres - mixer->width;
3309 right_rect.y = 0;
3310 right_rect.h = req->src.height;
3311
3312 if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) {
3313 if (req->src.width <= mixer->width) {
3314 pr_warn("right fb pipe not needed\n");
3315 ret = -EINVAL;
3316 goto done;
3317 }
3318 req->src_rect = req->dst_rect = right_rect;
3319 if (split_lm && rotate_180)
3320 req->src_rect = left_rect;
3321 } else {
3322 req->src_rect = req->dst_rect = left_rect;
3323 if (split_lm && rotate_180)
3324 req->src_rect = right_rect;
3325 }
3326
3327 req->z_order = MDSS_MDP_STAGE_BASE;
3328 if (rotate_180)
3329 req->flags |= (MDP_FLIP_LR | MDP_FLIP_UD);
3330
3331 pr_debug("allocating base pipe mux=%d\n", mixer_mux);
3332
3333 ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL,
3334 false);
3335 if (ret)
3336 goto done;
3337
3338 *pipe_allocated = true;
3339 }
3340 pr_debug("ctl=%d pnum=%d\n", mdp5_data->ctl->num, pipe->num);
3341
3342 *ppipe = pipe;
3343
3344done:
3345 kfree(req);
3346 return ret;
3347}
3348
3349static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
3350{
3351 struct mdss_mdp_data *buf_l = NULL, *buf_r = NULL;
3352 struct mdss_mdp_pipe *l_pipe, *r_pipe, *pipe, *tmp;
3353 struct fb_info *fbi;
3354 struct mdss_overlay_private *mdp5_data;
3355 struct mdss_data_type *mdata;
3356 u32 offset;
3357 int bpp, ret;
3358 bool l_pipe_allocated = false, r_pipe_allocated = false;
3359
3360 if (!mfd || !mfd->mdp.private1)
3361 return;
3362
3363 mdata = mfd_to_mdata(mfd);
3364 fbi = mfd->fbi;
3365 mdp5_data = mfd_to_mdp5_data(mfd);
3366
3367 if (!mdp5_data || !mdp5_data->ctl)
3368 return;
3369
3370 /*
3371 * Ignore writeback updates through pan_display as output
3372 * buffer is not available.
3373 */
3374 if (mfd->panel_info->type == WRITEBACK_PANEL) {
3375 pr_err_once("writeback update not supported through pan display\n");
3376 return;
3377 }
3378
3379 if (IS_ERR_OR_NULL(mfd->fbmem_buf) || fbi->fix.smem_len == 0 ||
3380 mdp5_data->borderfill_enable) {
3381 if (mdata->handoff_pending) {
3382 /*
3383 * Move pipes to cleanup queue and avoid kickoff if
3384 * pan display is called before handoff is completed.
3385 */
3386 mutex_lock(&mdp5_data->list_lock);
3387 list_for_each_entry_safe(pipe, tmp,
3388 &mdp5_data->pipes_used, list) {
3389 list_move(&pipe->list,
3390 &mdp5_data->pipes_cleanup);
3391 }
3392 mutex_unlock(&mdp5_data->list_lock);
3393 }
3394 mfd->mdp.kickoff_fnc(mfd, NULL);
3395 return;
3396 }
3397
3398 if (mutex_lock_interruptible(&mdp5_data->ov_lock))
3399 return;
3400
3401 if ((mdss_fb_is_power_off(mfd)) &&
3402 !((mfd->dcm_state == DCM_ENTER) &&
3403 (mfd->panel.type == MIPI_CMD_PANEL))) {
3404 mutex_unlock(&mdp5_data->ov_lock);
3405 return;
3406 }
3407
3408 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
3409
3410 bpp = fbi->var.bits_per_pixel / 8;
3411 offset = fbi->var.xoffset * bpp +
3412 fbi->var.yoffset * fbi->fix.line_length;
3413
3414 if (offset > fbi->fix.smem_len) {
3415 pr_err("invalid fb offset=%u total length=%u\n",
3416 offset, fbi->fix.smem_len);
3417 goto clk_disable;
3418 }
3419
3420 ret = mdss_mdp_overlay_get_fb_pipe(mfd, &l_pipe,
3421 MDSS_MDP_MIXER_MUX_LEFT, &l_pipe_allocated);
3422 if (ret) {
3423 pr_err("unable to allocate base pipe\n");
3424 goto iommu_disable;
3425 }
3426
3427 if (mdss_mdp_pipe_map(l_pipe)) {
3428 pr_err("unable to map base pipe\n");
3429 goto pipe_release;
3430 }
3431
3432 ret = mdss_mdp_overlay_start(mfd);
3433 if (ret) {
3434 pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
3435 goto clk_disable;
3436 }
3437
3438 ret = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303439 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303440 pr_err("IOMMU attach failed\n");
3441 goto clk_disable;
3442 }
3443
3444 buf_l = __mdp_overlay_buf_alloc(mfd, l_pipe);
3445 if (!buf_l) {
3446 pr_err("unable to allocate memory for fb buffer\n");
3447 mdss_mdp_pipe_unmap(l_pipe);
3448 goto pipe_release;
3449 }
3450
3451 buf_l->p[0].srcp_table = mfd->fb_table;
3452 buf_l->p[0].srcp_dma_buf = mfd->fbmem_buf;
3453 buf_l->p[0].len = 0;
3454 buf_l->p[0].addr = 0;
3455 buf_l->p[0].offset = offset;
3456 buf_l->p[0].skip_detach = true;
3457 buf_l->p[0].mapped = false;
3458 buf_l->num_planes = 1;
3459
3460 mdss_mdp_pipe_unmap(l_pipe);
3461
3462 if (fbi->var.xres > mdata->max_pipe_width || is_split_lm(mfd)) {
3463 /*
3464 * TODO: Need to revisit the function for panels with width more
3465 * than max_pipe_width and less than max_mixer_width.
3466 */
3467 ret = mdss_mdp_overlay_get_fb_pipe(mfd, &r_pipe,
3468 MDSS_MDP_MIXER_MUX_RIGHT, &r_pipe_allocated);
3469 if (ret) {
3470 pr_err("unable to allocate right base pipe\n");
3471 goto pipe_release;
3472 }
3473
3474 if (mdss_mdp_pipe_map(r_pipe)) {
3475 pr_err("unable to map right base pipe\n");
3476 goto pipe_release;
3477 }
3478
3479 buf_r = __mdp_overlay_buf_alloc(mfd, r_pipe);
3480 if (!buf_r) {
3481 pr_err("unable to allocate memory for fb buffer\n");
3482 mdss_mdp_pipe_unmap(r_pipe);
3483 goto pipe_release;
3484 }
3485
3486 buf_r->p[0] = buf_l->p[0];
3487 buf_r->num_planes = 1;
3488
3489 mdss_mdp_pipe_unmap(r_pipe);
3490 }
3491 mutex_unlock(&mdp5_data->ov_lock);
3492
3493 if ((fbi->var.activate & FB_ACTIVATE_VBL) ||
3494 (fbi->var.activate & FB_ACTIVATE_FORCE))
3495 mfd->mdp.kickoff_fnc(mfd, NULL);
3496
3497 mdss_iommu_ctrl(0);
3498 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
3499 return;
3500
3501pipe_release:
3502 if (r_pipe_allocated)
3503 mdss_mdp_overlay_release(mfd, r_pipe->ndx);
3504 if (buf_l)
3505 __mdp_overlay_buf_free(mfd, buf_l);
3506 if (l_pipe_allocated)
3507 mdss_mdp_overlay_release(mfd, l_pipe->ndx);
3508iommu_disable:
3509 mdss_iommu_ctrl(0);
3510clk_disable:
3511 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
3512 mutex_unlock(&mdp5_data->ov_lock);
3513}
3514
3515static void remove_underrun_vsync_handler(struct work_struct *work)
3516{
3517 int rc;
3518 struct mdss_mdp_ctl *ctl =
3519 container_of(work, typeof(*ctl), remove_underrun_handler);
3520
3521 if (!ctl || !ctl->ops.remove_vsync_handler) {
3522 pr_err("ctl or vsync handler is NULL\n");
3523 return;
3524 }
3525
3526 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
3527 rc = ctl->ops.remove_vsync_handler(ctl,
3528 &ctl->recover_underrun_handler);
3529 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
3530}
3531
3532static void mdss_mdp_recover_underrun_handler(struct mdss_mdp_ctl *ctl,
3533 ktime_t t)
3534{
3535 if (!ctl) {
3536 pr_err("ctl is NULL\n");
3537 return;
3538 }
3539
3540 mdss_mdp_ctl_reset(ctl, true);
3541 schedule_work(&ctl->remove_underrun_handler);
3542}
3543
3544/* do nothing in case of deterministic frame rate control, only keep vsync on */
3545static void mdss_mdp_overlay_frc_handler(struct mdss_mdp_ctl *ctl,
3546 ktime_t t)
3547{
3548 pr_debug("vsync on ctl%d vsync_cnt=%d\n", ctl->num, ctl->vsync_cnt);
3549}
3550
3551/* function is called in irq context should have minimum processing */
3552static void mdss_mdp_overlay_handle_vsync(struct mdss_mdp_ctl *ctl,
3553 ktime_t t)
3554{
3555 struct msm_fb_data_type *mfd = NULL;
3556 struct mdss_overlay_private *mdp5_data = NULL;
3557
3558 if (!ctl) {
3559 pr_err("ctl is NULL\n");
3560 return;
3561 }
3562
3563 mfd = ctl->mfd;
3564 if (!mfd || !mfd->mdp.private1) {
3565 pr_warn("Invalid handle for vsync\n");
3566 return;
3567 }
3568
3569 mdp5_data = mfd_to_mdp5_data(mfd);
3570 if (!mdp5_data) {
3571 pr_err("mdp5_data is NULL\n");
3572 return;
3573 }
3574
3575 pr_debug("vsync on fb%d play_cnt=%d\n", mfd->index, ctl->play_cnt);
3576
3577 mdp5_data->vsync_time = t;
3578 sysfs_notify_dirent(mdp5_data->vsync_event_sd);
3579}
3580
3581/* function is called in irq context should have minimum processing */
3582static void mdss_mdp_overlay_handle_lineptr(struct mdss_mdp_ctl *ctl,
3583 ktime_t t)
3584{
3585 struct mdss_overlay_private *mdp5_data = NULL;
3586
3587 if (!ctl || !ctl->mfd) {
3588 pr_warn("Invalid handle for lineptr\n");
3589 return;
3590 }
3591
3592 mdp5_data = mfd_to_mdp5_data(ctl->mfd);
3593 if (!mdp5_data) {
3594 pr_err("mdp5_data is NULL\n");
3595 return;
3596 }
3597
3598 pr_debug("lineptr irq on fb%d play_cnt=%d\n",
3599 ctl->mfd->index, ctl->play_cnt);
3600
3601 mdp5_data->lineptr_time = t;
3602 sysfs_notify_dirent(mdp5_data->lineptr_event_sd);
3603}
3604
3605int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en)
3606{
3607 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3608 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
3609 int rc;
3610
3611 if (!ctl)
3612 return -ENODEV;
3613
3614 mutex_lock(&mdp5_data->ov_lock);
3615 if (!ctl->ops.add_vsync_handler || !ctl->ops.remove_vsync_handler) {
3616 rc = -EOPNOTSUPP;
3617 pr_err_once("fb%d vsync handlers are not registered\n",
3618 mfd->index);
3619 goto end;
3620 }
3621
3622 if (!ctl->panel_data->panel_info.cont_splash_enabled
3623 && (!mdss_mdp_ctl_is_power_on(ctl) ||
3624 mdss_panel_is_power_on_ulp(ctl->power_state))) {
3625 pr_debug("fb%d vsync pending first update en=%d, ctl power state:%d\n",
3626 mfd->index, en, ctl->power_state);
3627 rc = -EPERM;
3628 goto end;
3629 }
3630
3631 pr_debug("fb%d vsync en=%d\n", mfd->index, en);
3632
3633 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
3634 if (en)
3635 rc = ctl->ops.add_vsync_handler(ctl, &ctl->vsync_handler);
3636 else
3637 rc = ctl->ops.remove_vsync_handler(ctl, &ctl->vsync_handler);
3638 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
3639
3640end:
3641 mutex_unlock(&mdp5_data->ov_lock);
3642 return rc;
3643}
3644
3645static ssize_t dynamic_fps_sysfs_rda_dfps(struct device *dev,
3646 struct device_attribute *attr, char *buf)
3647{
3648 ssize_t ret;
3649 struct mdss_panel_data *pdata;
3650 struct fb_info *fbi = dev_get_drvdata(dev);
3651 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
3652 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3653
3654 if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl))
3655 return 0;
3656
3657 pdata = dev_get_platdata(&mfd->pdev->dev);
3658 if (!pdata) {
3659 pr_err("no panel connected for fb%d\n", mfd->index);
3660 return -ENODEV;
3661 }
3662
3663 mutex_lock(&mdp5_data->dfps_lock);
3664 ret = snprintf(buf, PAGE_SIZE, "%d\n",
3665 pdata->panel_info.mipi.frame_rate);
3666 pr_debug("%s: '%d'\n", __func__,
3667 pdata->panel_info.mipi.frame_rate);
3668 mutex_unlock(&mdp5_data->dfps_lock);
3669
3670 return ret;
3671} /* dynamic_fps_sysfs_rda_dfps */
3672
3673static int calc_extra_blanking(struct mdss_panel_data *pdata, u32 new_fps)
3674{
3675 int add_porches, diff;
3676
3677 /* calculate extra: lines for vfp-method, pixels for hfp-method */
3678 diff = abs(pdata->panel_info.default_fps - new_fps);
3679 add_porches = mult_frac(pdata->panel_info.saved_total,
3680 diff, new_fps);
3681
3682 return add_porches;
3683}
3684
3685static void cache_initial_timings(struct mdss_panel_data *pdata)
3686{
3687 if (!pdata->panel_info.default_fps) {
3688
3689 /*
3690 * This value will change dynamically once the
3691 * actual dfps update happen in hw.
3692 */
3693 pdata->panel_info.current_fps =
3694 mdss_panel_get_framerate(&pdata->panel_info,
3695 FPS_RESOLUTION_DEFAULT);
3696
3697 /*
3698 * Keep the initial fps and porch values for this panel before
3699 * any dfps update happen, this is to prevent losing precision
3700 * in further calculations.
3701 */
3702 pdata->panel_info.default_fps =
3703 mdss_panel_get_framerate(&pdata->panel_info,
3704 FPS_RESOLUTION_DEFAULT);
3705
3706 if (pdata->panel_info.dfps_update ==
3707 DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) {
3708 pdata->panel_info.saved_total =
3709 mdss_panel_get_vtotal(&pdata->panel_info);
3710 pdata->panel_info.saved_fporch =
3711 pdata->panel_info.lcdc.v_front_porch;
3712
3713 } else if (pdata->panel_info.dfps_update ==
3714 DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP ||
3715 pdata->panel_info.dfps_update ==
3716 DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP ||
3717 pdata->panel_info.dfps_update ==
3718 DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
3719 pdata->panel_info.saved_total =
3720 mdss_panel_get_htotal(&pdata->panel_info, true);
3721 pdata->panel_info.saved_fporch =
3722 pdata->panel_info.lcdc.h_front_porch;
3723 }
3724 }
3725}
3726
3727static inline void dfps_update_fps(struct mdss_panel_info *pinfo, u32 fps)
3728{
3729 if (pinfo->type == DTV_PANEL)
3730 pinfo->lcdc.frame_rate = fps;
3731 else
3732 pinfo->mipi.frame_rate = fps;
3733}
3734
3735static void dfps_update_panel_params(struct mdss_panel_data *pdata,
3736 struct dynamic_fps_data *data)
3737{
3738 u32 new_fps = data->fps;
3739
3740 /* Keep initial values before any dfps update */
3741 cache_initial_timings(pdata);
3742
3743 if (pdata->panel_info.dfps_update ==
3744 DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) {
3745 int add_v_lines;
3746
3747 /* calculate extra vfp lines */
3748 add_v_lines = calc_extra_blanking(pdata, new_fps);
3749
3750 /* update panel info with new values */
3751 pdata->panel_info.lcdc.v_front_porch =
3752 pdata->panel_info.saved_fporch + add_v_lines;
3753
3754 dfps_update_fps(&pdata->panel_info, new_fps);
3755
3756 pdata->panel_info.prg_fet =
3757 mdss_mdp_get_prefetch_lines(&pdata->panel_info);
3758
3759 } else if (pdata->panel_info.dfps_update ==
3760 DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP) {
3761 int add_h_pixels;
3762
3763 /* calculate extra hfp pixels */
3764 add_h_pixels = calc_extra_blanking(pdata, new_fps);
3765
3766 /* update panel info */
3767 if (pdata->panel_info.default_fps > new_fps)
3768 pdata->panel_info.lcdc.h_front_porch =
3769 pdata->panel_info.saved_fporch + add_h_pixels;
3770 else
3771 pdata->panel_info.lcdc.h_front_porch =
3772 pdata->panel_info.saved_fporch - add_h_pixels;
3773
3774 dfps_update_fps(&pdata->panel_info, new_fps);
3775 } else if (pdata->panel_info.dfps_update ==
3776 DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP) {
3777
3778 pr_debug("hfp=%d, hbp=%d, hpw=%d, clk=%d, fps=%d\n",
3779 data->hfp, data->hbp, data->hpw,
3780 data->clk_rate, data->fps);
3781
3782 pdata->panel_info.lcdc.h_front_porch = data->hfp;
3783 pdata->panel_info.lcdc.h_back_porch = data->hbp;
3784 pdata->panel_info.lcdc.h_pulse_width = data->hpw;
3785
3786 pdata->panel_info.clk_rate = data->clk_rate;
3787 if (pdata->panel_info.type == DTV_PANEL)
3788 pdata->panel_info.clk_rate *= 1000;
3789
3790 dfps_update_fps(&pdata->panel_info, new_fps);
3791 } else if (pdata->panel_info.dfps_update ==
3792 DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
3793
3794 pr_debug("hfp=%d, hbp=%d, hpw=%d, clk=%d, fps=%d\n",
3795 data->hfp, data->hbp, data->hpw,
3796 data->clk_rate, data->fps);
3797
3798 pdata->panel_info.lcdc.h_front_porch = data->hfp;
3799 pdata->panel_info.lcdc.h_back_porch = data->hbp;
3800 pdata->panel_info.lcdc.h_pulse_width = data->hpw;
3801
3802 pdata->panel_info.clk_rate = data->clk_rate;
3803
3804 dfps_update_fps(&pdata->panel_info, new_fps);
3805 mdss_panel_update_clk_rate(&pdata->panel_info, new_fps);
3806 } else {
3807 dfps_update_fps(&pdata->panel_info, new_fps);
3808 mdss_panel_update_clk_rate(&pdata->panel_info, new_fps);
3809 }
3810}
3811
3812int mdss_mdp_dfps_update_params(struct msm_fb_data_type *mfd,
3813 struct mdss_panel_data *pdata, struct dynamic_fps_data *dfps_data)
3814{
3815 struct fb_var_screeninfo *var = &mfd->fbi->var;
3816 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3817 u32 dfps = dfps_data->fps;
3818
3819 mutex_lock(&mdp5_data->dfps_lock);
3820
3821 pr_debug("new_fps:%d\n", dfps);
3822
3823 if (dfps < pdata->panel_info.min_fps) {
3824 pr_err("Unsupported FPS. min_fps = %d\n",
3825 pdata->panel_info.min_fps);
3826 mutex_unlock(&mdp5_data->dfps_lock);
3827 return -EINVAL;
3828 } else if (dfps > pdata->panel_info.max_fps) {
3829 pr_warn("Unsupported FPS. Configuring to max_fps = %d\n",
3830 pdata->panel_info.max_fps);
3831 dfps = pdata->panel_info.max_fps;
3832 dfps_data->fps = dfps;
3833 }
3834
3835 dfps_update_panel_params(pdata, dfps_data);
3836 if (pdata->next)
3837 dfps_update_panel_params(pdata->next, dfps_data);
3838
3839 /*
3840 * Update the panel info in the upstream
3841 * data, so any further call to get the screen
3842 * info has the updated timings.
3843 */
3844 mdss_panelinfo_to_fb_var(&pdata->panel_info, var);
3845
3846 MDSS_XLOG(dfps);
3847 mutex_unlock(&mdp5_data->dfps_lock);
3848
3849 return 0;
3850}
3851
3852
3853static ssize_t dynamic_fps_sysfs_wta_dfps(struct device *dev,
3854 struct device_attribute *attr, const char *buf, size_t count)
3855{
3856 int panel_fps, rc = 0;
3857 struct mdss_panel_data *pdata;
3858 struct fb_info *fbi = dev_get_drvdata(dev);
3859 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
3860 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3861 struct dynamic_fps_data data = {0};
3862
3863 if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) {
3864 pr_debug("panel is off\n");
3865 return count;
3866 }
3867
3868 pdata = dev_get_platdata(&mfd->pdev->dev);
3869 if (!pdata) {
3870 pr_err("no panel connected for fb%d\n", mfd->index);
3871 return -ENODEV;
3872 }
3873
3874 if (!pdata->panel_info.dynamic_fps) {
3875 pr_err_once("%s: Dynamic fps not enabled for this panel\n",
3876 __func__);
3877 return -EINVAL;
3878 }
3879
3880 if (pdata->panel_info.dfps_update ==
3881 DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP ||
3882 pdata->panel_info.dfps_update ==
3883 DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
3884 if (sscanf(buf, "%u %u %u %u %u",
3885 &data.hfp, &data.hbp, &data.hpw,
3886 &data.clk_rate, &data.fps) != 5) {
3887 pr_err("could not read input\n");
3888 return -EINVAL;
3889 }
3890 } else {
3891 rc = kstrtoint(buf, 10, &data.fps);
3892 if (rc) {
3893 pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
3894 return rc;
3895 }
3896 }
3897
3898 panel_fps = mdss_panel_get_framerate(&pdata->panel_info,
3899 FPS_RESOLUTION_DEFAULT);
3900
3901 if (data.fps == panel_fps) {
3902 pr_debug("%s: FPS is already %d\n",
3903 __func__, data.fps);
3904 return count;
3905 }
3906
3907 if (data.hfp > DFPS_DATA_MAX_HFP || data.hbp > DFPS_DATA_MAX_HBP ||
3908 data.hpw > DFPS_DATA_MAX_HPW || data.fps > DFPS_DATA_MAX_FPS ||
3909 data.clk_rate > DFPS_DATA_MAX_CLK_RATE){
3910 pr_err("Data values out of bound.\n");
3911 return -EINVAL;
3912 }
3913
3914 rc = mdss_mdp_dfps_update_params(mfd, pdata, &data);
3915 if (rc) {
3916 pr_err("failed to set dfps params\n");
3917 return rc;
3918 }
3919
3920 return count;
3921} /* dynamic_fps_sysfs_wta_dfps */
3922
3923
3924static DEVICE_ATTR(dynamic_fps, 0644, dynamic_fps_sysfs_rda_dfps,
3925 dynamic_fps_sysfs_wta_dfps);
3926
3927static struct attribute *dynamic_fps_fs_attrs[] = {
3928 &dev_attr_dynamic_fps.attr,
3929 NULL,
3930};
3931static struct attribute_group dynamic_fps_fs_attrs_group = {
3932 .attrs = dynamic_fps_fs_attrs,
3933};
3934
3935static ssize_t mdss_mdp_vsync_show_event(struct device *dev,
3936 struct device_attribute *attr, char *buf)
3937{
3938 struct fb_info *fbi = dev_get_drvdata(dev);
3939 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
3940 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3941 u64 vsync_ticks;
3942 int ret;
3943
3944 if (!mdp5_data->ctl ||
3945 (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
3946 && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
3947 return -EAGAIN;
3948
3949 vsync_ticks = ktime_to_ns(mdp5_data->vsync_time);
3950
3951 pr_debug("fb%d vsync=%llu\n", mfd->index, vsync_ticks);
3952 ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks);
3953
3954 return ret;
3955}
3956
3957static ssize_t mdss_mdp_lineptr_show_event(struct device *dev,
3958 struct device_attribute *attr, char *buf)
3959{
3960 struct fb_info *fbi = dev_get_drvdata(dev);
3961 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
3962 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3963 u64 lineptr_ticks;
3964 int ret;
3965
3966 if (!mdp5_data->ctl ||
3967 (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
3968 && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
3969 return -EPERM;
3970
3971 lineptr_ticks = ktime_to_ns(mdp5_data->lineptr_time);
3972
3973 pr_debug("fb%d lineptr=%llu\n", mfd->index, lineptr_ticks);
3974 ret = scnprintf(buf, PAGE_SIZE, "LINEPTR=%llu\n", lineptr_ticks);
3975
3976 return ret;
3977}
3978
3979static ssize_t mdss_mdp_lineptr_show_value(struct device *dev,
3980 struct device_attribute *attr, char *buf)
3981{
3982 struct fb_info *fbi = dev_get_drvdata(dev);
3983 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
3984 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3985 int ret, lineptr_val;
3986
3987 if (!mdp5_data->ctl ||
3988 (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
3989 && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
3990 return -EPERM;
3991
3992 lineptr_val = mfd->panel_info->te.wr_ptr_irq;
3993
3994 ret = scnprintf(buf, PAGE_SIZE, "%d\n", lineptr_val);
3995
3996 return ret;
3997}
3998
3999static ssize_t mdss_mdp_lineptr_set_value(struct device *dev,
4000 struct device_attribute *attr, const char *buf, size_t count)
4001{
4002 struct fb_info *fbi = dev_get_drvdata(dev);
4003 struct msm_fb_data_type *mfd = fbi->par;
4004 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4005 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
4006 int ret, lineptr_value;
4007
4008 if (!ctl || (!ctl->panel_data->panel_info.cont_splash_enabled
4009 && !mdss_mdp_ctl_is_power_on(ctl)))
4010 return -EAGAIN;
4011
4012 ret = kstrtoint(buf, 10, &lineptr_value);
4013 if (ret || (lineptr_value < 0)
4014 || (lineptr_value > mfd->panel_info->yres)) {
4015 pr_err("Invalid input for lineptr\n");
4016 return -EINVAL;
4017 }
4018
4019 if (!mdss_mdp_is_lineptr_supported(ctl)) {
4020 pr_err("lineptr not supported\n");
4021 return -ENOTSUPP;
4022 }
4023
4024 mutex_lock(&mdp5_data->ov_lock);
4025 mfd->panel_info->te.wr_ptr_irq = lineptr_value;
4026 if (ctl && ctl->ops.update_lineptr)
4027 ctl->ops.update_lineptr(ctl, true);
4028 mutex_unlock(&mdp5_data->ov_lock);
4029
4030 return count;
4031}
4032
4033static ssize_t mdss_mdp_bl_show_event(struct device *dev,
4034 struct device_attribute *attr, char *buf)
4035{
4036 struct fb_info *fbi = dev_get_drvdata(dev);
4037 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4038 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4039 int ret;
4040
4041 ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->bl_events);
4042 return ret;
4043}
4044
4045static ssize_t mdss_mdp_hist_show_event(struct device *dev,
4046 struct device_attribute *attr, char *buf)
4047{
4048 struct fb_info *fbi = dev_get_drvdata(dev);
4049 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4050 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4051 int ret;
4052
4053 ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->hist_events);
4054 return ret;
4055}
4056
4057static ssize_t mdss_mdp_ad_show_event(struct device *dev,
4058 struct device_attribute *attr, char *buf)
4059{
4060 struct fb_info *fbi = dev_get_drvdata(dev);
4061 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4062 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4063 int ret;
4064
4065 ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->ad_events);
4066 return ret;
4067}
4068
4069static ssize_t mdss_mdp_ad_bl_show_event(struct device *dev,
4070 struct device_attribute *attr, char *buf)
4071{
4072 struct fb_info *fbi = dev_get_drvdata(dev);
4073 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4074 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4075 int ret;
4076
4077 ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->ad_bl_events);
4078 return ret;
4079}
4080
4081static inline int mdss_mdp_ad_is_supported(struct msm_fb_data_type *mfd)
4082{
4083 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
4084 struct mdss_mdp_mixer *mixer;
4085
4086 if (!ctl) {
4087 pr_debug("there is no ctl attached to fb\n");
4088 return 0;
4089 }
4090
4091 mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
4092 if (mixer && (mixer->num > ctl->mdata->nad_cfgs)) {
4093 if (!mixer)
4094 pr_warn("there is no mixer attached to fb\n");
4095 else
4096 pr_debug("mixer attached (%d) doesn't support ad\n",
4097 mixer->num);
4098 return 0;
4099 }
4100
4101 mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
4102 if (mixer && (mixer->num > ctl->mdata->nad_cfgs))
4103 return 0;
4104
4105 return 1;
4106}
4107
4108static ssize_t mdss_mdp_ad_show(struct device *dev,
4109 struct device_attribute *attr, char *buf)
4110{
4111 struct fb_info *fbi = dev_get_drvdata(dev);
4112 struct msm_fb_data_type *mfd = fbi->par;
4113 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4114 int ret, state;
4115
4116 state = mdss_mdp_ad_is_supported(mfd) ? mdp5_data->ad_state : -1;
4117
4118 ret = scnprintf(buf, PAGE_SIZE, "%d", state);
4119
4120 return ret;
4121}
4122
4123static ssize_t mdss_mdp_ad_store(struct device *dev,
4124 struct device_attribute *attr, const char *buf, size_t count)
4125{
4126 struct fb_info *fbi = dev_get_drvdata(dev);
4127 struct msm_fb_data_type *mfd = fbi->par;
4128 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4129 int ret, ad;
4130
4131 ret = kstrtoint(buf, 10, &ad);
4132 if (ret) {
4133 pr_err("Invalid input for ad\n");
4134 return -EINVAL;
4135 }
4136
4137 mdp5_data->ad_state = ad;
4138 sysfs_notify(&dev->kobj, NULL, "ad");
4139
4140 return count;
4141}
4142
4143static ssize_t mdss_mdp_dyn_pu_show(struct device *dev,
4144 struct device_attribute *attr, char *buf)
4145{
4146 struct fb_info *fbi = dev_get_drvdata(dev);
4147 struct msm_fb_data_type *mfd = fbi->par;
4148 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4149 int ret, state;
4150
4151 state = (mdp5_data->dyn_pu_state >= 0) ? mdp5_data->dyn_pu_state : -1;
4152
4153 ret = scnprintf(buf, PAGE_SIZE, "%d", state);
4154
4155 return ret;
4156}
4157
4158static ssize_t mdss_mdp_dyn_pu_store(struct device *dev,
4159 struct device_attribute *attr, const char *buf, size_t count)
4160{
4161 struct fb_info *fbi = dev_get_drvdata(dev);
4162 struct msm_fb_data_type *mfd = fbi->par;
4163 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4164 int ret, dyn_pu;
4165
4166 ret = kstrtoint(buf, 10, &dyn_pu);
4167 if (ret) {
4168 pr_err("Invalid input for partial update: ret = %d\n", ret);
4169 return ret;
4170 }
4171
4172 mdp5_data->dyn_pu_state = dyn_pu;
4173 sysfs_notify(&dev->kobj, NULL, "dyn_pu");
4174
4175 return count;
4176}
4177static ssize_t mdss_mdp_cmd_autorefresh_show(struct device *dev,
4178 struct device_attribute *attr, char *buf)
4179{
4180 ssize_t ret = 0;
4181 struct fb_info *fbi = dev_get_drvdata(dev);
4182 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4183 struct mdss_mdp_ctl *ctl;
4184
4185 if (!mfd) {
4186 pr_err("Invalid mfd structure\n");
4187 return -EINVAL;
4188 }
4189
4190 ctl = mfd_to_ctl(mfd);
4191 if (!ctl) {
4192 pr_err("Invalid ctl structure\n");
4193 return -EINVAL;
4194 }
4195
4196
4197 if (mfd->panel_info->type != MIPI_CMD_PANEL) {
4198 pr_err("Panel doesn't support autorefresh\n");
4199 ret = -EINVAL;
4200 } else {
4201 ret = snprintf(buf, PAGE_SIZE, "%d\n",
4202 mdss_mdp_ctl_cmd_get_autorefresh(ctl));
4203 }
4204 return ret;
4205}
4206
4207static ssize_t mdss_mdp_cmd_autorefresh_store(struct device *dev,
4208 struct device_attribute *attr, const char *buf, size_t len)
4209{
4210 int frame_cnt, rc;
4211 struct fb_info *fbi = dev_get_drvdata(dev);
4212 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4213 struct mdss_mdp_ctl *ctl;
4214
4215 if (!mfd) {
4216 pr_err("Invalid mfd structure\n");
4217 rc = -EINVAL;
4218 return rc;
4219 }
4220
4221 ctl = mfd_to_ctl(mfd);
4222 if (!ctl) {
4223 pr_err("Invalid ctl structure\n");
4224 rc = -EINVAL;
4225 return rc;
4226 }
4227
4228 if (mfd->panel_info->type != MIPI_CMD_PANEL) {
4229 pr_err("Panel doesn't support autorefresh\n");
4230 rc = -EINVAL;
4231 return rc;
4232 }
4233
4234 rc = kstrtoint(buf, 10, &frame_cnt);
4235 if (rc) {
4236 pr_err("kstrtoint failed. rc=%d\n", rc);
4237 return rc;
4238 }
4239
4240 rc = mdss_mdp_ctl_cmd_set_autorefresh(ctl, frame_cnt);
4241 if (rc) {
4242 pr_err("cmd_set_autorefresh failed, rc=%d, frame_cnt=%d\n",
4243 rc, frame_cnt);
4244 return rc;
4245 }
4246
4247 if (frame_cnt) {
4248 /* enable/reconfig autorefresh */
4249 mfd->mdp_sync_pt_data.threshold = 2;
4250 mfd->mdp_sync_pt_data.retire_threshold = 0;
4251 } else {
4252 /* disable autorefresh */
4253 mfd->mdp_sync_pt_data.threshold = 1;
4254 mfd->mdp_sync_pt_data.retire_threshold = 1;
4255 }
4256
4257 pr_debug("setting cmd autorefresh to cnt=%d\n", frame_cnt);
4258
4259 return len;
4260}
4261
4262
4263/* Print the last CRC Value read for batch mode */
4264static ssize_t mdss_mdp_misr_show(struct device *dev,
4265 struct device_attribute *attr, char *buf)
4266{
4267 ssize_t ret = 0;
4268 struct fb_info *fbi = dev_get_drvdata(dev);
4269 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4270 struct mdss_mdp_ctl *ctl;
4271
4272 if (!mfd) {
4273 pr_err("Invalid mfd structure\n");
4274 return -EINVAL;
4275 }
4276
4277 ctl = mfd_to_ctl(mfd);
4278 if (!ctl) {
4279 pr_err("Invalid ctl structure\n");
4280 return -EINVAL;
4281 }
4282
4283 ret = mdss_dump_misr_data(&buf, PAGE_SIZE);
4284
4285 return ret;
4286}
4287
4288/*
4289 * Enable crc batch mode. By enabling this mode through sysfs
4290 * driver will keep collecting the misr in ftrace during interrupts,
4291 * until disabled.
4292 */
4293static ssize_t mdss_mdp_misr_store(struct device *dev,
4294 struct device_attribute *attr, const char *buf, size_t len)
4295{
4296 int enable_misr, rc;
4297 struct fb_info *fbi = dev_get_drvdata(dev);
4298 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4299 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4300 struct mdss_mdp_ctl *ctl;
4301 struct mdp_misr req, sreq;
4302
4303 if (!mfd) {
4304 pr_err("Invalid mfd structure\n");
4305 rc = -EINVAL;
4306 return rc;
4307 }
4308
4309 ctl = mfd_to_ctl(mfd);
4310 if (!ctl) {
4311 pr_err("Invalid ctl structure\n");
4312 rc = -EINVAL;
4313 return rc;
4314 }
4315
4316 rc = kstrtoint(buf, 10, &enable_misr);
4317 if (rc) {
4318 pr_err("kstrtoint failed. rc=%d\n", rc);
4319 return rc;
4320 }
4321
4322 req.block_id = DISPLAY_MISR_MAX;
4323 sreq.block_id = DISPLAY_MISR_MAX;
4324
4325 pr_debug("intf_type:%d enable:%d\n", ctl->intf_type, enable_misr);
4326 if (ctl->intf_type == MDSS_INTF_DSI) {
4327
4328 req.block_id = DISPLAY_MISR_DSI0;
4329 req.crc_op_mode = MISR_OP_BM;
4330 req.frame_count = 1;
4331 if (is_panel_split(mfd)) {
4332
4333 sreq.block_id = DISPLAY_MISR_DSI1;
4334 sreq.crc_op_mode = MISR_OP_BM;
4335 sreq.frame_count = 1;
4336 }
4337 } else if (ctl->intf_type == MDSS_INTF_HDMI) {
4338
4339 req.block_id = DISPLAY_MISR_HDMI;
4340 req.crc_op_mode = MISR_OP_BM;
4341 req.frame_count = 1;
4342 } else {
4343 pr_err("misr not supported fo this fb:%d\n", mfd->index);
4344 rc = -ENODEV;
4345 return rc;
4346 }
4347
4348 if (enable_misr) {
4349 mdss_misr_set(mdata, &req, ctl);
4350
4351 if ((ctl->intf_type == MDSS_INTF_DSI) && is_panel_split(mfd))
4352 mdss_misr_set(mdata, &sreq, ctl);
4353
4354 } else {
4355 mdss_misr_disable(mdata, &req, ctl);
4356
4357 if ((ctl->intf_type == MDSS_INTF_DSI) && is_panel_split(mfd))
4358 mdss_misr_disable(mdata, &sreq, ctl);
4359 }
4360
4361 pr_debug("misr %s\n", enable_misr ? "enabled" : "disabled");
4362
4363 return len;
4364}
4365
4366static DEVICE_ATTR(msm_misr_en, 0644,
4367 mdss_mdp_misr_show, mdss_mdp_misr_store);
4368static DEVICE_ATTR(msm_cmd_autorefresh_en, 0644,
4369 mdss_mdp_cmd_autorefresh_show, mdss_mdp_cmd_autorefresh_store);
4370static DEVICE_ATTR(vsync_event, 0444, mdss_mdp_vsync_show_event, NULL);
4371static DEVICE_ATTR(lineptr_event, 0444, mdss_mdp_lineptr_show_event, NULL);
4372static DEVICE_ATTR(lineptr_value, 0664,
4373 mdss_mdp_lineptr_show_value, mdss_mdp_lineptr_set_value);
4374static DEVICE_ATTR(ad, 0664, mdss_mdp_ad_show,
4375 mdss_mdp_ad_store);
4376static DEVICE_ATTR(dyn_pu, 0664, mdss_mdp_dyn_pu_show,
4377 mdss_mdp_dyn_pu_store);
4378static DEVICE_ATTR(hist_event, 0444, mdss_mdp_hist_show_event, NULL);
4379static DEVICE_ATTR(bl_event, 0444, mdss_mdp_bl_show_event, NULL);
4380static DEVICE_ATTR(ad_event, 0444, mdss_mdp_ad_show_event, NULL);
4381static DEVICE_ATTR(ad_bl_event, 0444, mdss_mdp_ad_bl_show_event, NULL);
4382
4383static struct attribute *mdp_overlay_sysfs_attrs[] = {
4384 &dev_attr_vsync_event.attr,
4385 &dev_attr_lineptr_event.attr,
4386 &dev_attr_lineptr_value.attr,
4387 &dev_attr_ad.attr,
4388 &dev_attr_dyn_pu.attr,
4389 &dev_attr_msm_misr_en.attr,
4390 &dev_attr_msm_cmd_autorefresh_en.attr,
4391 &dev_attr_hist_event.attr,
4392 &dev_attr_bl_event.attr,
4393 &dev_attr_ad_event.attr,
4394 &dev_attr_ad_bl_event.attr,
4395 NULL,
4396};
4397
4398static struct attribute_group mdp_overlay_sysfs_group = {
4399 .attrs = mdp_overlay_sysfs_attrs,
4400};
4401
4402static void mdss_mdp_hw_cursor_setpos(struct mdss_mdp_mixer *mixer,
4403 struct mdss_rect *roi, u32 start_x, u32 start_y)
4404{
4405 int roi_xy = (roi->y << 16) | roi->x;
4406 int start_xy = (start_y << 16) | start_x;
4407 int roi_size = (roi->h << 16) | roi->w;
4408
4409 if (!mixer) {
4410 pr_err("mixer not available\n");
4411 return;
4412 }
4413 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_XY, roi_xy);
4414 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_START_XY, start_xy);
4415 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
4416}
4417
4418static void mdss_mdp_hw_cursor_setimage(struct mdss_mdp_mixer *mixer,
4419 struct fb_cursor *cursor, u32 cursor_addr, struct mdss_rect *roi)
4420{
4421 int calpha_en, transp_en, alpha, size;
4422 struct fb_image *img = &cursor->image;
4423 u32 blendcfg;
4424 int roi_size = 0;
4425
4426 if (!mixer) {
4427 pr_err("mixer not available\n");
4428 return;
4429 }
4430
4431 if (img->bg_color == 0xffffffff)
4432 transp_en = 0;
4433 else
4434 transp_en = 1;
4435
4436 alpha = (img->fg_color & 0xff000000) >> 24;
4437
4438 if (alpha)
4439 calpha_en = 0x0; /* xrgb */
4440 else
4441 calpha_en = 0x2; /* argb */
4442
4443 roi_size = (roi->h << 16) | roi->w;
4444 size = (img->height << 16) | img->width;
4445 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_IMG_SIZE, size);
4446 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
4447 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_STRIDE,
4448 img->width * 4);
4449 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BASE_ADDR,
4450 cursor_addr);
4451 blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
4452 blendcfg &= ~0x1;
4453 blendcfg |= (transp_en << 3) | (calpha_en << 1);
4454 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
4455 blendcfg);
4456 if (calpha_en)
4457 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_PARAM,
4458 alpha);
4459
4460 if (transp_en) {
4461 mdp_mixer_write(mixer,
4462 MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW0,
4463 ((img->bg_color & 0xff00) << 8) |
4464 (img->bg_color & 0xff));
4465 mdp_mixer_write(mixer,
4466 MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW1,
4467 ((img->bg_color & 0xff0000) >> 16));
4468 mdp_mixer_write(mixer,
4469 MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH0,
4470 ((img->bg_color & 0xff00) << 8) |
4471 (img->bg_color & 0xff));
4472 mdp_mixer_write(mixer,
4473 MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH1,
4474 ((img->bg_color & 0xff0000) >> 16));
4475 }
4476}
4477
4478static void mdss_mdp_hw_cursor_blend_config(struct mdss_mdp_mixer *mixer,
4479 struct fb_cursor *cursor)
4480{
4481 u32 blendcfg;
4482
4483 if (!mixer) {
4484 pr_err("mixer not availbale\n");
4485 return;
4486 }
4487
4488 blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
4489 if (!cursor->enable != !(blendcfg & 0x1)) {
4490 if (cursor->enable) {
4491 pr_debug("enable hw cursor on mixer=%d\n", mixer->num);
4492 blendcfg |= 0x1;
4493 } else {
4494 pr_debug("disable hw cursor on mixer=%d\n", mixer->num);
4495 blendcfg &= ~0x1;
4496 }
4497
4498 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
4499 blendcfg);
4500 mixer->cursor_enabled = cursor->enable;
4501 mixer->params_changed++;
4502 }
4503
4504}
4505
4506static void mdss_mdp_set_rect(struct mdp_rect *rect, u16 x, u16 y, u16 w,
4507 u16 h)
4508{
4509 rect->x = x;
4510 rect->y = y;
4511 rect->w = w;
4512 rect->h = h;
4513}
4514
4515static void mdss_mdp_curor_pipe_cleanup(struct msm_fb_data_type *mfd,
4516 int cursor_pipe)
4517{
4518 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4519
4520 if (mdp5_data->cursor_ndx[cursor_pipe] != MSMFB_NEW_REQUEST) {
4521 mdss_mdp_overlay_release(mfd,
4522 mdp5_data->cursor_ndx[cursor_pipe]);
4523 mdp5_data->cursor_ndx[cursor_pipe] = MSMFB_NEW_REQUEST;
4524 }
4525}
4526
4527int mdss_mdp_cursor_flush(struct msm_fb_data_type *mfd,
4528 struct mdss_mdp_pipe *pipe, int cursor_pipe)
4529{
4530 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4531 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
4532 struct mdss_mdp_ctl *sctl = NULL;
4533 u32 flush_bits = BIT(22 + pipe->num - MDSS_MDP_SSPP_CURSOR0);
4534
4535 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
4536
4537 mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
4538 MDSS_XLOG(ctl->intf_num, flush_bits);
4539 if ((!ctl->split_flush_en) && pipe->mixer_right) {
4540 sctl = mdss_mdp_get_split_ctl(ctl);
4541 if (!sctl) {
4542 pr_err("not able to get the other ctl\n");
4543 return -ENODEV;
4544 }
4545 mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
4546 MDSS_XLOG(sctl->intf_num, flush_bits);
4547 }
4548
4549 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
4550
4551 return 0;
4552}
4553
4554static int mdss_mdp_cursor_pipe_setup(struct msm_fb_data_type *mfd,
4555 struct mdp_overlay *req, int cursor_pipe) {
4556 struct mdss_mdp_pipe *pipe;
4557 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4558 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4559 int ret = 0;
4560 u32 cursor_addr;
4561 struct mdss_mdp_data *buf = NULL;
4562
4563 req->id = mdp5_data->cursor_ndx[cursor_pipe];
4564 ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL, false);
4565 if (ret) {
4566 pr_err("cursor pipe setup failed, cursor_pipe:%d, ret:%d\n",
4567 cursor_pipe, ret);
4568 mdp5_data->cursor_ndx[cursor_pipe] = MSMFB_NEW_REQUEST;
4569 return ret;
4570 }
4571
4572 pr_debug("req id:%d cursor_pipe:%d pnum:%d\n",
4573 req->id, cursor_pipe, pipe->ndx);
4574
4575 if (mdata->mdss_util->iommu_attached()) {
4576 cursor_addr = mfd->cursor_buf_iova;
4577 } else {
4578 if (MDSS_LPAE_CHECK(mfd->cursor_buf_phys)) {
4579 pr_err("can't access phy mem >4GB w/o iommu\n");
4580 ret = -ERANGE;
4581 goto done;
4582 }
4583 cursor_addr = mfd->cursor_buf_phys;
4584 }
4585
4586 buf = __mdp_overlay_buf_alloc(mfd, pipe);
4587 if (!buf) {
4588 pr_err("unable to allocate memory for cursor buffer\n");
4589 ret = -ENOMEM;
4590 goto done;
4591 }
4592 mdp5_data->cursor_ndx[cursor_pipe] = pipe->ndx;
4593 buf->p[0].addr = cursor_addr;
4594 buf->p[0].len = mdss_mdp_get_cursor_frame_size(mdata);
4595 buf->num_planes = 1;
4596
4597 buf->state = MDP_BUF_STATE_ACTIVE;
4598 if (!(req->flags & MDP_SOLID_FILL))
4599 ret = mdss_mdp_pipe_queue_data(pipe, buf);
4600 else
4601 ret = mdss_mdp_pipe_queue_data(pipe, NULL);
4602
4603 if (ret) {
4604 pr_err("cursor pipe queue data failed in async mode\n");
4605 return ret;
4606 }
4607
4608 ret = mdss_mdp_cursor_flush(mfd, pipe, cursor_pipe);
4609done:
4610 if (ret && mdp5_data->cursor_ndx[cursor_pipe] == MSMFB_NEW_REQUEST)
4611 mdss_mdp_overlay_release(mfd, pipe->ndx);
4612
4613 return ret;
4614}
4615
4616static int mdss_mdp_hw_cursor_pipe_update(struct msm_fb_data_type *mfd,
4617 struct fb_cursor *cursor)
4618{
4619 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4620 struct mdss_mdp_mixer *mixer;
4621 struct fb_image *img = &cursor->image;
4622 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4623 struct mdp_overlay *req = NULL;
4624 struct mdss_rect roi;
4625 int ret = 0;
4626 struct fb_var_screeninfo *var = &mfd->fbi->var;
4627 u32 xres = var->xres;
4628 u32 yres = var->yres;
4629 u32 start_x = img->dx;
4630 u32 start_y = img->dy;
4631 u32 left_lm_w = left_lm_w_from_mfd(mfd);
4632 struct platform_device *pdev = mfd->pdev;
4633 u32 cursor_frame_size = mdss_mdp_get_cursor_frame_size(mdata);
4634
4635 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
4636 if (ret)
4637 return ret;
4638
4639 if (mdss_fb_is_power_off(mfd)) {
4640 ret = -EPERM;
4641 goto done;
4642 }
4643
4644 if (!cursor->enable) {
4645 mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_LEFT);
4646 mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_RIGHT);
4647 goto done;
4648 }
4649
4650 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
4651 if (!mixer) {
4652 ret = -ENODEV;
4653 goto done;
4654 }
4655
4656 if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
4657 ret = mdss_smmu_dma_alloc_coherent(&pdev->dev,
4658 cursor_frame_size, (dma_addr_t *) &mfd->cursor_buf_phys,
4659 &mfd->cursor_buf_iova, &mfd->cursor_buf,
4660 GFP_KERNEL, MDSS_IOMMU_DOMAIN_UNSECURE);
4661 if (ret) {
4662 pr_err("can't allocate cursor buffer rc:%d\n", ret);
4663 goto done;
4664 }
4665
4666 mixer->cursor_hotx = 0;
4667 mixer->cursor_hoty = 0;
4668 }
4669
4670 pr_debug("mixer=%d enable=%x set=%x\n", mixer->num, cursor->enable,
4671 cursor->set);
4672
4673 if (cursor->set & FB_CUR_SETHOT) {
4674 if ((cursor->hot.x < img->width) &&
4675 (cursor->hot.y < img->height)) {
4676 mixer->cursor_hotx = cursor->hot.x;
4677 mixer->cursor_hoty = cursor->hot.y;
4678 /* Update cursor position */
4679 cursor->set |= FB_CUR_SETPOS;
4680 } else {
4681 pr_err("Invalid cursor hotspot coordinates\n");
4682 ret = -EINVAL;
4683 goto done;
4684 }
4685 }
4686
4687 memset(&roi, 0, sizeof(struct mdss_rect));
4688 if (start_x > mixer->cursor_hotx) {
4689 start_x -= mixer->cursor_hotx;
4690 } else {
4691 roi.x = mixer->cursor_hotx - start_x;
4692 start_x = 0;
4693 }
4694 if (start_y > mixer->cursor_hoty) {
4695 start_y -= mixer->cursor_hoty;
4696 } else {
4697 roi.y = mixer->cursor_hoty - start_y;
4698 start_y = 0;
4699 }
4700
4701 if ((img->width > mdata->max_cursor_size) ||
4702 (img->height > mdata->max_cursor_size) ||
4703 (img->depth != 32) || (start_x >= xres) ||
4704 (start_y >= yres)) {
4705 pr_err("Invalid cursor image coordinates\n");
4706 ret = -EINVAL;
4707 goto done;
4708 }
4709
4710 roi.w = min(xres - start_x, img->width - roi.x);
4711 roi.h = min(yres - start_y, img->height - roi.y);
4712
4713 if ((roi.w > mdata->max_cursor_size) ||
4714 (roi.h > mdata->max_cursor_size)) {
4715 pr_err("Invalid cursor ROI size\n");
4716 ret = -EINVAL;
4717 goto done;
4718 }
4719
4720 req = kcalloc(1, sizeof(struct mdp_overlay), GFP_KERNEL);
4721 if (!req) {
4722 ret = -ENOMEM;
4723 goto done;
4724 }
4725
4726 req->pipe_type = PIPE_TYPE_CURSOR;
4727 req->z_order = HW_CURSOR_STAGE(mdata);
4728
4729 req->src.width = img->width;
4730 req->src.height = img->height;
4731 req->src.format = mfd->fb_imgType;
4732
4733 mdss_mdp_set_rect(&req->src_rect, roi.x, roi.y, roi.w, roi.h);
4734 mdss_mdp_set_rect(&req->dst_rect, start_x, start_y, roi.w, roi.h);
4735
4736 req->bg_color = img->bg_color;
4737 req->alpha = (img->fg_color >> ((32 - var->transp.offset) - 8)) & 0xff;
4738 if (req->alpha)
4739 req->blend_op = BLEND_OP_PREMULTIPLIED;
4740 else
4741 req->blend_op = BLEND_OP_COVERAGE;
4742 req->transp_mask = img->bg_color & ~(0xff << var->transp.offset);
4743
4744 if (mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
4745 ret = copy_from_user(mfd->cursor_buf, img->data,
4746 img->width * img->height * 4);
4747 if (ret) {
4748 pr_err("copy_from_user error. rc=%d\n", ret);
4749 goto done;
4750 }
4751
4752 mixer->cursor_hotx = 0;
4753 mixer->cursor_hoty = 0;
4754 }
4755
4756 /*
4757 * When source split is enabled, only CURSOR_PIPE_LEFT is used,
4758 * with both mixers of the pipe staged all the time.
4759 * When source split is disabled, 2 pipes are staged, with one
4760 * pipe containing the actual data and another one a transparent
4761 * solid fill when the data falls only in left or right dsi.
4762 * Both are done to support async cursor functionality.
4763 */
4764 if (mdata->has_src_split || (!is_split_lm(mfd))
4765 || (mdata->ncursor_pipes == 1)) {
4766 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
4767 } else if ((start_x + roi.w) <= left_lm_w) {
4768 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
4769 if (ret)
4770 goto done;
4771 req->bg_color = 0;
4772 req->flags |= MDP_SOLID_FILL;
4773 req->dst_rect.x = left_lm_w;
4774 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
4775 } else if (start_x >= left_lm_w) {
4776 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
4777 if (ret)
4778 goto done;
4779 req->bg_color = 0;
4780 req->flags |= MDP_SOLID_FILL;
4781 req->dst_rect.x = 0;
4782 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
4783 } else if ((start_x <= left_lm_w) && ((start_x + roi.w) >= left_lm_w)) {
4784 mdss_mdp_set_rect(&req->dst_rect, start_x, start_y,
4785 (left_lm_w - start_x), roi.h);
4786 mdss_mdp_set_rect(&req->src_rect, 0, 0, (left_lm_w -
4787 start_x), roi.h);
4788 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
4789 if (ret)
4790 goto done;
4791
4792 mdss_mdp_set_rect(&req->dst_rect, left_lm_w, start_y, ((start_x
4793 + roi.w) - left_lm_w), roi.h);
4794 mdss_mdp_set_rect(&req->src_rect, (left_lm_w - start_x), 0,
4795 (roi.w - (left_lm_w - start_x)), roi.h);
4796 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
4797 } else {
4798 pr_err("Invalid case for cursor pipe setup\n");
4799 ret = -EINVAL;
4800 }
4801
4802done:
4803 if (ret) {
4804 mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_LEFT);
4805 mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_RIGHT);
4806 }
4807
4808 kfree(req);
4809 mutex_unlock(&mdp5_data->ov_lock);
4810 return ret;
4811}
4812
4813static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
4814 struct fb_cursor *cursor)
4815{
4816 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4817 struct mdss_mdp_mixer *mixer_left = NULL;
4818 struct mdss_mdp_mixer *mixer_right = NULL;
4819 struct fb_image *img = &cursor->image;
4820 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4821 struct fbcurpos cursor_hot;
4822 struct mdss_rect roi;
4823 int ret = 0;
4824 u32 xres = mfd->fbi->var.xres;
4825 u32 yres = mfd->fbi->var.yres;
4826 u32 start_x = img->dx;
4827 u32 start_y = img->dy;
4828 u32 left_lm_w = left_lm_w_from_mfd(mfd);
4829 struct platform_device *pdev = mfd->pdev;
4830 u32 cursor_frame_size = mdss_mdp_get_cursor_frame_size(mdata);
4831
4832 mixer_left = mdss_mdp_mixer_get(mdp5_data->ctl,
4833 MDSS_MDP_MIXER_MUX_DEFAULT);
4834 if (!mixer_left)
4835 return -ENODEV;
4836 if (is_split_lm(mfd)) {
4837 mixer_right = mdss_mdp_mixer_get(mdp5_data->ctl,
4838 MDSS_MDP_MIXER_MUX_RIGHT);
4839 if (!mixer_right)
4840 return -ENODEV;
4841 }
4842
4843 if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
4844 ret = mdss_smmu_dma_alloc_coherent(&pdev->dev,
4845 cursor_frame_size, (dma_addr_t *) &mfd->cursor_buf_phys,
4846 &mfd->cursor_buf_iova, &mfd->cursor_buf,
4847 GFP_KERNEL, MDSS_IOMMU_DOMAIN_UNSECURE);
4848 if (ret) {
4849 pr_err("can't allocate cursor buffer rc:%d\n", ret);
4850 return ret;
4851 }
4852 }
4853
4854 if ((img->width > mdata->max_cursor_size) ||
4855 (img->height > mdata->max_cursor_size) ||
4856 (img->depth != 32) || (start_x >= xres) || (start_y >= yres))
4857 return -EINVAL;
4858
4859 pr_debug("enable=%x set=%x\n", cursor->enable, cursor->set);
4860
4861 memset(&cursor_hot, 0, sizeof(struct fbcurpos));
4862 memset(&roi, 0, sizeof(struct mdss_rect));
4863 if (cursor->set & FB_CUR_SETHOT) {
4864 if ((cursor->hot.x < img->width) &&
4865 (cursor->hot.y < img->height)) {
4866 cursor_hot.x = cursor->hot.x;
4867 cursor_hot.y = cursor->hot.y;
4868 /* Update cursor position */
4869 cursor->set |= FB_CUR_SETPOS;
4870 } else {
4871 pr_err("Invalid cursor hotspot coordinates\n");
4872 return -EINVAL;
4873 }
4874 }
4875
4876 if (start_x > cursor_hot.x) {
4877 start_x -= cursor_hot.x;
4878 } else {
4879 roi.x = cursor_hot.x - start_x;
4880 start_x = 0;
4881 }
4882 if (start_y > cursor_hot.y) {
4883 start_y -= cursor_hot.y;
4884 } else {
4885 roi.y = cursor_hot.y - start_y;
4886 start_y = 0;
4887 }
4888
4889 roi.w = min(xres - start_x, img->width - roi.x);
4890 roi.h = min(yres - start_y, img->height - roi.y);
4891
4892 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
4893
4894 if (mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
4895 u32 cursor_addr;
4896
4897 ret = copy_from_user(mfd->cursor_buf, img->data,
4898 img->width * img->height * 4);
4899 if (ret) {
4900 pr_err("copy_from_user error. rc=%d\n", ret);
4901 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
4902 return ret;
4903 }
4904
4905 if (mdata->mdss_util->iommu_attached()) {
4906 cursor_addr = mfd->cursor_buf_iova;
4907 } else {
4908 if (MDSS_LPAE_CHECK(mfd->cursor_buf_phys)) {
4909 pr_err("can't access phy mem >4GB w/o iommu\n");
4910 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
4911 return -ERANGE;
4912 }
4913 cursor_addr = mfd->cursor_buf_phys;
4914 }
4915 mdss_mdp_hw_cursor_setimage(mixer_left, cursor, cursor_addr,
4916 &roi);
4917 if (is_split_lm(mfd))
4918 mdss_mdp_hw_cursor_setimage(mixer_right, cursor,
4919 cursor_addr, &roi);
4920 }
4921
4922 if ((start_x + roi.w) <= left_lm_w) {
4923 if (cursor->set & FB_CUR_SETPOS)
4924 mdss_mdp_hw_cursor_setpos(mixer_left, &roi, start_x,
4925 start_y);
4926 mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
4927 cursor->enable = false;
4928 mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
4929 } else if (start_x >= left_lm_w) {
4930 start_x -= left_lm_w;
4931 if (cursor->set & FB_CUR_SETPOS)
4932 mdss_mdp_hw_cursor_setpos(mixer_right, &roi, start_x,
4933 start_y);
4934 mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
4935 cursor->enable = false;
4936 mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
4937 } else {
4938 struct mdss_rect roi_right = roi;
4939
4940 roi.w = left_lm_w - start_x;
4941 if (cursor->set & FB_CUR_SETPOS)
4942 mdss_mdp_hw_cursor_setpos(mixer_left, &roi, start_x,
4943 start_y);
4944 mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
4945
4946 roi_right.x = 0;
4947 roi_right.w = (start_x + roi_right.w) - left_lm_w;
4948 start_x = 0;
4949 if (cursor->set & FB_CUR_SETPOS)
4950 mdss_mdp_hw_cursor_setpos(mixer_right, &roi_right,
4951 start_x, start_y);
4952 mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
4953 }
4954
4955 mixer_left->ctl->flush_bits |= BIT(6) << mixer_left->num;
4956 if (is_split_lm(mfd))
4957 mixer_right->ctl->flush_bits |= BIT(6) << mixer_right->num;
4958 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
4959 return 0;
4960}
4961
4962static int mdss_bl_scale_config(struct msm_fb_data_type *mfd,
4963 struct mdp_bl_scale_data *data)
4964{
4965 int ret = 0;
4966 int curr_bl;
4967
4968 mutex_lock(&mfd->bl_lock);
4969 curr_bl = mfd->bl_level;
4970 mfd->bl_scale = data->scale;
4971 mfd->bl_min_lvl = data->min_lvl;
4972 pr_debug("update scale = %d, min_lvl = %d\n", mfd->bl_scale,
4973 mfd->bl_min_lvl);
4974
4975 /* Update current backlight to use new scaling, if it is not zero */
4976 if (curr_bl)
4977 mdss_fb_set_backlight(mfd, curr_bl);
4978
4979 mutex_unlock(&mfd->bl_lock);
4980 return ret;
4981}
4982
4983static int mdss_mdp_pp_ioctl(struct msm_fb_data_type *mfd,
4984 void __user *argp)
4985{
4986 int ret;
4987 struct msmfb_mdp_pp mdp_pp;
4988 u32 copyback = 0;
4989 u32 copy_from_kernel = 0;
4990
4991 ret = copy_from_user(&mdp_pp, argp, sizeof(mdp_pp));
4992 if (ret)
4993 return ret;
4994
4995 /* Supprt only MDP register read/write and
4996 * exit_dcm in DCM state
4997 */
4998 if (mfd->dcm_state == DCM_ENTER &&
4999 (mdp_pp.op != mdp_op_calib_buffer &&
5000 mdp_pp.op != mdp_op_calib_dcm_state))
5001 return -EPERM;
5002
5003 switch (mdp_pp.op) {
5004 case mdp_op_pa_cfg:
5005 ret = mdss_mdp_pa_config(mfd, &mdp_pp.data.pa_cfg_data,
5006 &copyback);
5007 break;
5008
5009 case mdp_op_pa_v2_cfg:
5010 ret = mdss_mdp_pa_v2_config(mfd, &mdp_pp.data.pa_v2_cfg_data,
5011 &copyback);
5012 break;
5013
5014 case mdp_op_pcc_cfg:
5015 ret = mdss_mdp_pcc_config(mfd, &mdp_pp.data.pcc_cfg_data,
5016 &copyback);
5017 break;
5018
5019 case mdp_op_lut_cfg:
5020 switch (mdp_pp.data.lut_cfg_data.lut_type) {
5021 case mdp_lut_igc:
5022 ret = mdss_mdp_igc_lut_config(mfd,
5023 (struct mdp_igc_lut_data *)
5024 &mdp_pp.data.lut_cfg_data.data,
5025 &copyback, copy_from_kernel);
5026 break;
5027
5028 case mdp_lut_pgc:
5029 ret = mdss_mdp_argc_config(mfd,
5030 &mdp_pp.data.lut_cfg_data.data.pgc_lut_data,
5031 &copyback);
5032 break;
5033
5034 case mdp_lut_hist:
5035 ret = mdss_mdp_hist_lut_config(mfd,
5036 (struct mdp_hist_lut_data *)
5037 &mdp_pp.data.lut_cfg_data.data, &copyback);
5038 break;
5039
5040 default:
5041 ret = -ENOTSUPP;
5042 break;
5043 }
5044 break;
5045 case mdp_op_dither_cfg:
5046 ret = mdss_mdp_dither_config(mfd,
5047 &mdp_pp.data.dither_cfg_data,
5048 &copyback,
5049 false);
5050 break;
5051 case mdp_op_gamut_cfg:
5052 ret = mdss_mdp_gamut_config(mfd,
5053 &mdp_pp.data.gamut_cfg_data,
5054 &copyback);
5055 break;
5056 case mdp_bl_scale_cfg:
5057 ret = mdss_bl_scale_config(mfd, (struct mdp_bl_scale_data *)
5058 &mdp_pp.data.bl_scale_data);
5059 break;
5060 case mdp_op_ad_cfg:
5061 ret = mdss_mdp_ad_config(mfd, &mdp_pp.data.ad_init_cfg);
5062 break;
5063 case mdp_op_ad_input:
5064 ret = mdss_mdp_ad_input(mfd, &mdp_pp.data.ad_input, 1);
5065 if (ret > 0) {
5066 ret = 0;
5067 copyback = 1;
5068 }
5069 break;
5070 case mdp_op_calib_cfg:
5071 ret = mdss_mdp_calib_config((struct mdp_calib_config_data *)
5072 &mdp_pp.data.calib_cfg, &copyback);
5073 break;
5074 case mdp_op_calib_mode:
5075 ret = mdss_mdp_calib_mode(mfd, &mdp_pp.data.mdss_calib_cfg);
5076 break;
5077 case mdp_op_calib_buffer:
5078 ret = mdss_mdp_calib_config_buffer(
5079 (struct mdp_calib_config_buffer *)
5080 &mdp_pp.data.calib_buffer, &copyback);
5081 break;
5082 case mdp_op_calib_dcm_state:
5083 ret = mdss_fb_dcm(mfd, mdp_pp.data.calib_dcm.dcm_state);
5084 break;
5085 default:
5086 pr_err("Unsupported request to MDP_PP IOCTL. %d = op\n",
5087 mdp_pp.op);
5088 ret = -EINVAL;
5089 break;
5090 }
5091 if ((ret == 0) && copyback)
5092 ret = copy_to_user(argp, &mdp_pp, sizeof(struct msmfb_mdp_pp));
5093 return ret;
5094}
5095
5096static int mdss_mdp_histo_ioctl(struct msm_fb_data_type *mfd, u32 cmd,
5097 void __user *argp)
5098{
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305099 int ret = -ENOTSUPP;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305100 struct mdp_histogram_data hist;
5101 struct mdp_histogram_start_req hist_req;
5102 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
5103 u32 block;
5104
5105 if (!mdata)
5106 return -EPERM;
5107
5108 switch (cmd) {
5109 case MSMFB_HISTOGRAM_START:
5110 if (mdss_fb_is_power_off(mfd))
5111 return -EPERM;
5112
5113 ret = copy_from_user(&hist_req, argp, sizeof(hist_req));
5114 if (ret)
5115 return ret;
5116
5117 ret = mdss_mdp_hist_start(&hist_req);
5118 break;
5119
5120 case MSMFB_HISTOGRAM_STOP:
5121 ret = copy_from_user(&block, argp, sizeof(int));
5122 if (ret)
5123 return ret;
5124
5125 ret = mdss_mdp_hist_stop(block);
5126 if (ret)
5127 return ret;
5128 break;
5129
5130 case MSMFB_HISTOGRAM:
5131 if (mdss_fb_is_power_off(mfd)) {
5132 pr_err("mfd is turned off MSMFB_HISTOGRAM failed\n");
5133 return -EPERM;
5134 }
5135
5136 ret = copy_from_user(&hist, argp, sizeof(hist));
5137 if (ret)
5138 return ret;
5139
5140 ret = mdss_mdp_hist_collect(&hist);
5141 if (!ret)
5142 ret = copy_to_user(argp, &hist, sizeof(hist));
5143 break;
5144 default:
5145 break;
5146 }
5147 return ret;
5148}
5149
5150static int mdss_fb_set_metadata(struct msm_fb_data_type *mfd,
5151 struct msmfb_metadata *metadata)
5152{
5153 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
5154 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
5155 int ret = 0;
5156
5157 if (!ctl)
5158 return -EPERM;
5159 switch (metadata->op) {
5160 case metadata_op_vic:
5161 if (mfd->panel_info)
5162 mfd->panel_info->vic =
5163 metadata->data.video_info_code;
5164 else
5165 ret = -EINVAL;
5166 break;
5167 case metadata_op_crc:
5168 if (mdss_fb_is_power_off(mfd))
5169 return -EPERM;
5170 ret = mdss_misr_set(mdata, &metadata->data.misr_request, ctl);
5171 break;
5172 default:
5173 pr_warn("unsupported request to MDP META IOCTL\n");
5174 ret = -EINVAL;
5175 break;
5176 }
5177 return ret;
5178}
5179
5180static int mdss_fb_get_hw_caps(struct msm_fb_data_type *mfd,
5181 struct mdss_hw_caps *caps)
5182{
5183 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
5184
5185 caps->mdp_rev = mdata->mdp_rev;
5186 caps->vig_pipes = mdata->nvig_pipes;
5187 caps->rgb_pipes = mdata->nrgb_pipes;
5188 caps->dma_pipes = mdata->ndma_pipes;
5189 if (mdata->has_bwc)
5190 caps->features |= MDP_BWC_EN;
5191 if (mdata->has_decimation)
5192 caps->features |= MDP_DECIMATION_EN;
5193
5194 if (mdata->smp_mb_cnt) {
5195 caps->max_smp_cnt = mdata->smp_mb_cnt;
5196 caps->smp_per_pipe = mdata->smp_mb_per_pipe;
5197 }
5198
5199 return 0;
5200}
5201
5202static int mdss_fb_get_metadata(struct msm_fb_data_type *mfd,
5203 struct msmfb_metadata *metadata)
5204{
5205 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
5206 struct mdss_mdp_ctl *ctl = NULL;
5207 int ret = 0;
5208
5209 switch (metadata->op) {
5210 case metadata_op_frame_rate:
5211 metadata->data.panel_frame_rate =
5212 mdss_panel_get_framerate(mfd->panel_info,
5213 FPS_RESOLUTION_DEFAULT);
5214 pr_debug("current fps:%d\n", metadata->data.panel_frame_rate);
5215 break;
5216 case metadata_op_get_caps:
5217 ret = mdss_fb_get_hw_caps(mfd, &metadata->data.caps);
5218 break;
5219 case metadata_op_get_ion_fd:
5220 if (mfd->fb_ion_handle && mfd->fb_ion_client) {
5221 get_dma_buf(mfd->fbmem_buf);
5222 metadata->data.fbmem_ionfd =
5223 ion_share_dma_buf_fd(mfd->fb_ion_client,
5224 mfd->fb_ion_handle);
5225 if (metadata->data.fbmem_ionfd < 0) {
5226 dma_buf_put(mfd->fbmem_buf);
5227 pr_err("fd allocation failed. fd = %d\n",
5228 metadata->data.fbmem_ionfd);
5229 }
5230 }
5231 break;
5232 case metadata_op_crc:
5233 ctl = mfd_to_ctl(mfd);
5234 if (!ctl || mdss_fb_is_power_off(mfd))
5235 return -EPERM;
5236 ret = mdss_misr_get(mdata, &metadata->data.misr_request, ctl,
5237 ctl->is_video_mode);
5238 break;
5239 default:
5240 pr_warn("Unsupported request to MDP META IOCTL.\n");
5241 ret = -EINVAL;
5242 break;
5243 }
5244 return ret;
5245}
5246
5247static int __mdss_mdp_clean_dirty_pipes(struct msm_fb_data_type *mfd)
5248{
5249 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
5250 struct mdss_mdp_pipe *pipe;
5251 int unset_ndx = 0;
5252
5253 mutex_lock(&mdp5_data->list_lock);
5254 list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
5255 if (pipe->dirty)
5256 unset_ndx |= pipe->ndx;
5257 }
5258 mutex_unlock(&mdp5_data->list_lock);
5259 if (unset_ndx)
5260 mdss_mdp_overlay_release(mfd, unset_ndx);
5261
5262 return unset_ndx;
5263}
5264
5265static int mdss_mdp_overlay_precommit(struct msm_fb_data_type *mfd)
5266{
5267 struct mdss_overlay_private *mdp5_data;
5268 int ret;
5269
5270 if (!mfd)
5271 return -ENODEV;
5272
5273 mdp5_data = mfd_to_mdp5_data(mfd);
5274 if (!mdp5_data)
5275 return -ENODEV;
5276
5277 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
5278 if (ret)
5279 return ret;
5280
5281 /*
5282 * we can assume that any pipes that are still dirty at this point are
5283 * not properly tracked by user land. This could be for any reason,
5284 * mark them for cleanup at this point.
5285 */
5286 ret = __mdss_mdp_clean_dirty_pipes(mfd);
5287 if (ret) {
5288 pr_warn("fb%d: dirty pipes remaining %x\n",
5289 mfd->index, ret);
5290 ret = -EPIPE;
5291 }
5292
5293 /*
5294 * If we are in process of mode switch we may have an invalid state.
5295 * We can allow commit to happen if there are no pipes attached as only
5296 * border color will be seen regardless of resolution or mode.
5297 */
5298 if ((mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) &&
5299 (mfd->switch_state != MDSS_MDP_WAIT_FOR_COMMIT)) {
5300 if (list_empty(&mdp5_data->pipes_used)) {
5301 mfd->switch_state = MDSS_MDP_WAIT_FOR_COMMIT;
5302 } else {
5303 pr_warn("Invalid commit on fb%d with state=%d\n",
5304 mfd->index, mfd->switch_state);
5305 ret = -EINVAL;
5306 }
5307 }
5308 mutex_unlock(&mdp5_data->ov_lock);
5309
5310 return ret;
5311}
5312
5313/*
5314 * This routine serves two purposes.
5315 * 1. Propagate overlay_id returned from sorted list to original list
5316 * to user-space.
5317 * 2. In case of error processing sorted list, map the error overlay's
5318 * index to original list because user-space is not aware of the sorted list.
5319 */
5320static int __mdss_overlay_map(struct mdp_overlay *ovs,
5321 struct mdp_overlay *op_ovs, int num_ovs, int num_ovs_processed)
5322{
5323 int mapped = num_ovs_processed;
5324 int j, k;
5325
5326 for (j = 0; j < num_ovs; j++) {
5327 for (k = 0; k < num_ovs; k++) {
5328 if ((ovs[j].dst_rect.x == op_ovs[k].dst_rect.x) &&
5329 (ovs[j].z_order == op_ovs[k].z_order)) {
5330 op_ovs[k].id = ovs[j].id;
5331 op_ovs[k].priority = ovs[j].priority;
5332 break;
5333 }
5334 }
5335
5336 if ((mapped != num_ovs) && (mapped == j)) {
5337 pr_debug("mapped %d->%d\n", mapped, k);
5338 mapped = k;
5339 }
5340 }
5341
5342 return mapped;
5343}
5344
5345static inline void __overlay_swap_func(void *a, void *b, int size)
5346{
5347 swap(*(struct mdp_overlay *)a, *(struct mdp_overlay *)b);
5348}
5349
5350static inline int __zorder_dstx_cmp_func(const void *a, const void *b)
5351{
5352 int rc = 0;
5353 const struct mdp_overlay *ov1 = a;
5354 const struct mdp_overlay *ov2 = b;
5355
5356 if (ov1->z_order < ov2->z_order)
5357 rc = -1;
5358 else if ((ov1->z_order == ov2->z_order) &&
5359 (ov1->dst_rect.x < ov2->dst_rect.x))
5360 rc = -1;
5361
5362 return rc;
5363}
5364
5365/*
5366 * first sort list of overlays based on z_order and then within
5367 * same z_order sort them on dst_x.
5368 */
5369static int __mdss_overlay_src_split_sort(struct msm_fb_data_type *mfd,
5370 struct mdp_overlay *ovs, int num_ovs)
5371{
5372 int i;
5373 int left_lm_zo_cnt[MDSS_MDP_MAX_STAGE] = {0};
5374 int right_lm_zo_cnt[MDSS_MDP_MAX_STAGE] = {0};
5375 u32 left_lm_w = left_lm_w_from_mfd(mfd);
5376
5377 sort(ovs, num_ovs, sizeof(struct mdp_overlay), __zorder_dstx_cmp_func,
5378 __overlay_swap_func);
5379
5380 for (i = 0; i < num_ovs; i++) {
5381 if (ovs[i].z_order >= MDSS_MDP_MAX_STAGE) {
5382 pr_err("invalid stage:%u\n", ovs[i].z_order);
5383 return -EINVAL;
5384 }
5385 if (ovs[i].dst_rect.x < left_lm_w) {
5386 if (left_lm_zo_cnt[ovs[i].z_order] == 2) {
5387 pr_err("more than 2 ov @ stage%u on left lm\n",
5388 ovs[i].z_order);
5389 return -EINVAL;
5390 }
5391 left_lm_zo_cnt[ovs[i].z_order]++;
5392 } else {
5393 if (right_lm_zo_cnt[ovs[i].z_order] == 2) {
5394 pr_err("more than 2 ov @ stage%u on right lm\n",
5395 ovs[i].z_order);
5396 return -EINVAL;
5397 }
5398 right_lm_zo_cnt[ovs[i].z_order]++;
5399 }
5400 }
5401
5402 return 0;
5403}
5404
5405static int __handle_overlay_prepare(struct msm_fb_data_type *mfd,
5406 struct mdp_overlay_list *ovlist, struct mdp_overlay *ip_ovs)
5407{
5408 int ret, i;
5409 int new_reqs = 0, left_cnt = 0, right_cnt = 0;
5410 int num_ovs = ovlist->num_overlays;
5411 u32 left_lm_w = left_lm_w_from_mfd(mfd);
5412 u32 left_lm_ovs = 0, right_lm_ovs = 0;
5413 bool is_single_layer = false;
5414
5415 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
5416 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
5417
5418 struct mdp_overlay *sorted_ovs = NULL;
5419 struct mdp_overlay *req, *prev_req;
5420
5421 struct mdss_mdp_pipe *pipe, *left_blend_pipe;
5422 struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM] = { 0 };
5423 struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM] = { 0 };
5424
5425 bool sort_needed = mdata->has_src_split && (num_ovs > 1);
5426
5427 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
5428 if (ret)
5429 return ret;
5430
5431 if (mdss_fb_is_power_off(mfd)) {
5432 mutex_unlock(&mdp5_data->ov_lock);
5433 return -EPERM;
5434 }
5435
5436 if (sort_needed) {
5437 sorted_ovs = kcalloc(num_ovs, sizeof(*ip_ovs), GFP_KERNEL);
5438 if (!sorted_ovs) {
5439 pr_err("error allocating ovlist mem\n");
5440 return -ENOMEM;
5441 }
5442 memcpy(sorted_ovs, ip_ovs, num_ovs * sizeof(*ip_ovs));
5443 ret = __mdss_overlay_src_split_sort(mfd, sorted_ovs, num_ovs);
5444 if (ret) {
5445 pr_err("src_split_sort failed. ret=%d\n", ret);
5446 kfree(sorted_ovs);
5447 return ret;
5448 }
5449 }
5450
5451 pr_debug("prepare fb%d num_ovs=%d\n", mfd->index, num_ovs);
5452
5453 for (i = 0; i < num_ovs; i++) {
5454 if (IS_RIGHT_MIXER_OV(ip_ovs[i].flags, ip_ovs[i].dst_rect.x,
5455 left_lm_w))
5456 right_lm_ovs++;
5457 else
5458 left_lm_ovs++;
5459
5460 if ((left_lm_ovs > 1) && (right_lm_ovs > 1))
5461 break;
5462 }
5463
5464 for (i = 0; i < num_ovs; i++) {
5465 left_blend_pipe = NULL;
5466
5467 if (sort_needed) {
5468 req = &sorted_ovs[i];
5469 prev_req = (i > 0) ? &sorted_ovs[i - 1] : NULL;
5470
5471 /*
5472 * check if current overlay is at same z_order as
5473 * previous one and qualifies as a right blend. If yes,
5474 * pass a pointer to the pipe representing previous
5475 * overlay or in other terms left blend overlay.
5476 */
5477 if (prev_req && (prev_req->z_order == req->z_order) &&
5478 is_ov_right_blend(&prev_req->dst_rect,
5479 &req->dst_rect, left_lm_w)) {
5480 left_blend_pipe = pipe;
5481 }
5482 } else {
5483 req = &ip_ovs[i];
5484 }
5485
5486 if (IS_RIGHT_MIXER_OV(ip_ovs[i].flags, ip_ovs[i].dst_rect.x,
5487 left_lm_w))
5488 is_single_layer = (right_lm_ovs == 1);
5489 else
5490 is_single_layer = (left_lm_ovs == 1);
5491
5492 req->z_order += MDSS_MDP_STAGE_0;
5493 ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe,
5494 left_blend_pipe, is_single_layer);
5495 req->z_order -= MDSS_MDP_STAGE_0;
5496
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305497 if (IS_ERR_VALUE((unsigned long)ret))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305498 goto validate_exit;
5499
5500 pr_debug("pnum:%d id:0x%x flags:0x%x dst_x:%d l_blend_pnum%d\n",
5501 pipe->num, req->id, req->flags, req->dst_rect.x,
5502 left_blend_pipe ? left_blend_pipe->num : -1);
5503
5504 /* keep track of the new overlays to unset in case of errors */
5505 if (pipe->play_cnt == 0)
5506 new_reqs |= pipe->ndx;
5507
5508 if (IS_RIGHT_MIXER_OV(pipe->flags, pipe->dst.x, left_lm_w)) {
5509 if (right_cnt >= MAX_PIPES_PER_LM) {
5510 pr_err("too many pipes on right mixer\n");
5511 ret = -EINVAL;
5512 goto validate_exit;
5513 }
5514 right_plist[right_cnt] = pipe;
5515 right_cnt++;
5516 } else {
5517 if (left_cnt >= MAX_PIPES_PER_LM) {
5518 pr_err("too many pipes on left mixer\n");
5519 ret = -EINVAL;
5520 goto validate_exit;
5521 }
5522 left_plist[left_cnt] = pipe;
5523 left_cnt++;
5524 }
5525 }
5526
5527 ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
5528 right_plist, right_cnt);
5529
5530validate_exit:
5531 if (sort_needed)
5532 ovlist->processed_overlays =
5533 __mdss_overlay_map(sorted_ovs, ip_ovs, num_ovs, i);
5534 else
5535 ovlist->processed_overlays = i;
5536
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305537 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305538 pr_debug("err=%d total_ovs:%d processed:%d left:%d right:%d\n",
5539 ret, num_ovs, ovlist->processed_overlays, left_lm_ovs,
5540 right_lm_ovs);
5541 mdss_mdp_overlay_release(mfd, new_reqs);
5542 }
5543 mutex_unlock(&mdp5_data->ov_lock);
5544
5545 kfree(sorted_ovs);
5546
5547 return ret;
5548}
5549
5550static int __handle_ioctl_overlay_prepare(struct msm_fb_data_type *mfd,
5551 void __user *argp)
5552{
5553 struct mdp_overlay_list ovlist;
5554 struct mdp_overlay *req_list[OVERLAY_MAX];
5555 struct mdp_overlay *overlays;
5556 int i, ret;
5557
5558 if (!mfd_to_ctl(mfd))
5559 return -ENODEV;
5560
5561 if (copy_from_user(&ovlist, argp, sizeof(ovlist)))
5562 return -EFAULT;
5563
5564 if (ovlist.num_overlays > OVERLAY_MAX) {
5565 pr_err("Number of overlays exceeds max\n");
5566 return -EINVAL;
5567 }
5568
5569 overlays = kmalloc_array(ovlist.num_overlays, sizeof(*overlays),
5570 GFP_KERNEL);
5571 if (!overlays)
5572 return -ENOMEM;
5573
5574 if (copy_from_user(req_list, ovlist.overlay_list,
5575 sizeof(struct mdp_overlay *) *
5576 ovlist.num_overlays)) {
5577 ret = -EFAULT;
5578 goto validate_exit;
5579 }
5580
5581 for (i = 0; i < ovlist.num_overlays; i++) {
5582 if (copy_from_user(overlays + i, req_list[i],
5583 sizeof(struct mdp_overlay))) {
5584 ret = -EFAULT;
5585 goto validate_exit;
5586 }
5587 }
5588
5589 ret = __handle_overlay_prepare(mfd, &ovlist, overlays);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305590 if (!IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305591 for (i = 0; i < ovlist.num_overlays; i++) {
5592 if (copy_to_user(req_list[i], overlays + i,
5593 sizeof(struct mdp_overlay))) {
5594 ret = -EFAULT;
5595 goto validate_exit;
5596 }
5597 }
5598 }
5599
5600 if (copy_to_user(argp, &ovlist, sizeof(ovlist)))
5601 ret = -EFAULT;
5602
5603validate_exit:
5604 kfree(overlays);
5605
5606 return ret;
5607}
5608
5609static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
5610 u32 cmd, void __user *argp)
5611{
5612 struct mdp_overlay *req = NULL;
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305613 int val, ret = -ENOTSUPP;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305614 struct msmfb_metadata metadata;
5615 struct mdp_pp_feature_version pp_feature_version;
5616 struct msmfb_overlay_data data;
5617 struct mdp_set_cfg cfg;
5618
5619 switch (cmd) {
5620 case MSMFB_MDP_PP:
5621 ret = mdss_mdp_pp_ioctl(mfd, argp);
5622 break;
5623 case MSMFB_MDP_PP_GET_FEATURE_VERSION:
5624 ret = copy_from_user(&pp_feature_version, argp,
5625 sizeof(pp_feature_version));
5626 if (ret) {
5627 pr_err("copy_from_user failed for pp_feature_version\n");
5628 ret = -EFAULT;
5629 } else {
5630 ret = mdss_mdp_pp_get_version(&pp_feature_version);
5631 if (!ret) {
5632 ret = copy_to_user(argp, &pp_feature_version,
5633 sizeof(pp_feature_version));
5634 if (ret) {
5635 pr_err("copy_to_user failed for pp_feature_version\n");
5636 ret = -EFAULT;
5637 }
5638 } else {
5639 pr_err("get pp version failed ret %d\n", ret);
5640 }
5641 }
5642 break;
5643 case MSMFB_HISTOGRAM_START:
5644 case MSMFB_HISTOGRAM_STOP:
5645 case MSMFB_HISTOGRAM:
5646 ret = mdss_mdp_histo_ioctl(mfd, cmd, argp);
5647 break;
5648
5649 case MSMFB_OVERLAY_GET:
5650 req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
5651 if (!req)
5652 return -ENOMEM;
5653 ret = copy_from_user(req, argp, sizeof(*req));
5654 if (!ret) {
5655 ret = mdss_mdp_overlay_get(mfd, req);
5656
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305657 if (!IS_ERR_VALUE((unsigned long)ret))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305658 ret = copy_to_user(argp, req, sizeof(*req));
5659 }
5660
5661 if (ret)
5662 pr_debug("OVERLAY_GET failed (%d)\n", ret);
5663 break;
5664
5665 case MSMFB_OVERLAY_SET:
5666 req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
5667 if (!req)
5668 return -ENOMEM;
5669 ret = copy_from_user(req, argp, sizeof(*req));
5670 if (!ret) {
5671 ret = mdss_mdp_overlay_set(mfd, req);
5672
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305673 if (!IS_ERR_VALUE((unsigned long)ret))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305674 ret = copy_to_user(argp, req, sizeof(*req));
5675 }
5676 if (ret)
5677 pr_debug("OVERLAY_SET failed (%d)\n", ret);
5678 break;
5679
5680 case MSMFB_OVERLAY_UNSET:
5681 if (!IS_ERR_VALUE(copy_from_user(&val, argp, sizeof(val))))
5682 ret = mdss_mdp_overlay_unset(mfd, val);
5683 break;
5684
5685 case MSMFB_OVERLAY_PLAY:
5686 ret = copy_from_user(&data, argp, sizeof(data));
5687 if (!ret)
5688 ret = mdss_mdp_overlay_play(mfd, &data);
5689
5690 if (ret)
5691 pr_debug("OVERLAY_PLAY failed (%d)\n", ret);
5692 break;
5693
5694 case MSMFB_OVERLAY_VSYNC_CTRL:
5695 if (!copy_from_user(&val, argp, sizeof(val))) {
5696 ret = mdss_mdp_overlay_vsync_ctrl(mfd, val);
5697 } else {
5698 pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed (%d)\n", ret);
5699 ret = -EFAULT;
5700 }
5701 break;
5702
5703 case MSMFB_METADATA_SET:
5704 ret = copy_from_user(&metadata, argp, sizeof(metadata));
5705 if (ret)
5706 return ret;
5707 ret = mdss_fb_set_metadata(mfd, &metadata);
5708 break;
5709
5710 case MSMFB_METADATA_GET:
5711 ret = copy_from_user(&metadata, argp, sizeof(metadata));
5712 if (ret)
5713 return ret;
5714 ret = mdss_fb_get_metadata(mfd, &metadata);
5715 if (!ret)
5716 ret = copy_to_user(argp, &metadata, sizeof(metadata));
5717 break;
5718
5719 case MSMFB_OVERLAY_PREPARE:
5720 ret = __handle_ioctl_overlay_prepare(mfd, argp);
5721 break;
5722 case MSMFB_MDP_SET_CFG:
5723 ret = copy_from_user(&cfg, argp, sizeof(cfg));
5724 if (ret) {
5725 pr_err("copy failed MSMFB_MDP_SET_CFG ret %d\n", ret);
5726 ret = -EFAULT;
5727 break;
5728 }
5729 ret = mdss_mdp_set_cfg(mfd, &cfg);
5730 break;
5731
5732 default:
5733 break;
5734 }
5735
5736 kfree(req);
5737 return ret;
5738}
5739
5740/**
5741 * __mdss_mdp_overlay_ctl_init - Helper function to initialize control structure
5742 * @mfd: msm frame buffer data structure associated with the fb device.
5743 *
5744 * Helper function that allocates and initializes the mdp control structure
5745 * for a frame buffer device. Whenever applicable, this function will also setup
5746 * the control for the split display path as well.
5747 *
5748 * Return: pointer to the newly allocated control structure.
5749 */
5750static struct mdss_mdp_ctl *__mdss_mdp_overlay_ctl_init(
5751 struct msm_fb_data_type *mfd)
5752{
5753 int rc = 0;
5754 struct mdss_mdp_ctl *ctl;
5755 struct mdss_panel_data *pdata;
5756 struct mdss_overlay_private *mdp5_data;
5757
5758 if (!mfd)
5759 return ERR_PTR(-EINVAL);
5760
5761 pdata = dev_get_platdata(&mfd->pdev->dev);
5762 if (!pdata) {
5763 pr_err("no panel connected for fb%d\n", mfd->index);
5764 rc = -ENODEV;
5765 goto error;
5766 }
5767
5768 mdp5_data = mfd_to_mdp5_data(mfd);
5769 if (!mdp5_data) {
5770 rc = -EINVAL;
5771 goto error;
5772 }
5773
5774 ctl = mdss_mdp_ctl_init(pdata, mfd);
5775 if (IS_ERR_OR_NULL(ctl)) {
5776 pr_err("Unable to initialize ctl for fb%d\n",
5777 mfd->index);
5778 rc = PTR_ERR(ctl);
5779 goto error;
5780 }
5781 ctl->is_master = true;
5782 ctl->vsync_handler.vsync_handler =
5783 mdss_mdp_overlay_handle_vsync;
5784 ctl->vsync_handler.cmd_post_flush = false;
5785
5786 ctl->recover_underrun_handler.vsync_handler =
5787 mdss_mdp_recover_underrun_handler;
5788 ctl->recover_underrun_handler.cmd_post_flush = false;
5789
5790 ctl->frc_vsync_handler.vsync_handler =
5791 mdss_mdp_overlay_frc_handler;
5792 ctl->frc_vsync_handler.cmd_post_flush = false;
5793
5794 ctl->lineptr_handler.lineptr_handler =
5795 mdss_mdp_overlay_handle_lineptr;
5796
5797 INIT_WORK(&ctl->remove_underrun_handler,
5798 remove_underrun_vsync_handler);
5799
5800 if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
5801 /* enable split display */
5802 rc = mdss_mdp_ctl_split_display_setup(ctl, pdata->next);
5803 if (rc) {
5804 mdss_mdp_ctl_destroy(ctl);
5805 goto error;
5806 }
5807 }
5808
5809 mdp5_data->ctl = ctl;
5810error:
5811 if (rc)
5812 return ERR_PTR(rc);
5813 else
5814 return ctl;
5815}
5816
5817static void mdss_mdp_set_lm_flag(struct msm_fb_data_type *mfd)
5818{
5819 u32 width;
5820 struct mdss_data_type *mdata;
5821
5822 /* if lm_widths are set, the split_mode would have been set */
5823 if (mfd->panel_info->lm_widths[0] && mfd->panel_info->lm_widths[1])
5824 return;
5825
5826 mdata = mdss_mdp_get_mdata();
5827 width = mfd->fbi->var.xres;
5828
5829 /* setting the appropriate split_mode for HDMI usecases */
5830 if ((mfd->split_mode == MDP_SPLIT_MODE_NONE ||
5831 mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
5832 (width > mdata->max_mixer_width)) {
5833 width /= 2;
5834 mfd->split_mode = MDP_DUAL_LM_SINGLE_DISPLAY;
5835 mfd->split_fb_left = width;
5836 mfd->split_fb_right = width;
5837 } else if (is_dual_lm_single_display(mfd) &&
5838 (width <= mdata->max_mixer_width)) {
5839 mfd->split_mode = MDP_SPLIT_MODE_NONE;
5840 mfd->split_fb_left = 0;
5841 mfd->split_fb_right = 0;
5842 }
5843}
5844
5845static void mdss_mdp_handle_invalid_switch_state(struct msm_fb_data_type *mfd)
5846{
5847 int rc = 0;
5848 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
5849 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
5850 struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
5851 struct mdss_mdp_data *buf, *tmpbuf;
5852
5853 mfd->switch_state = MDSS_MDP_NO_UPDATE_REQUESTED;
5854
5855 /*
5856 * Handle only for cmd mode panels as for video mode, buffers
5857 * cannot be freed at this point. Needs revisting to handle the
5858 * use case for video mode panels.
5859 */
5860 if (mfd->panel_info->type == MIPI_CMD_PANEL) {
5861 if (ctl->ops.wait_pingpong)
5862 rc = ctl->ops.wait_pingpong(ctl, NULL);
5863 if (!rc && sctl && sctl->ops.wait_pingpong)
5864 rc = sctl->ops.wait_pingpong(sctl, NULL);
5865 if (rc) {
5866 pr_err("wait for pp failed\n");
5867 return;
5868 }
5869
5870 mutex_lock(&mdp5_data->list_lock);
5871 list_for_each_entry_safe(buf, tmpbuf,
5872 &mdp5_data->bufs_used, buf_list)
5873 list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
5874 mutex_unlock(&mdp5_data->list_lock);
5875 }
5876}
5877
5878static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
5879{
5880 int rc;
5881 struct mdss_overlay_private *mdp5_data;
5882 struct mdss_mdp_ctl *ctl = NULL;
5883 struct mdss_data_type *mdata;
5884
5885 if (!mfd)
5886 return -ENODEV;
5887
5888 if (mfd->key != MFD_KEY)
5889 return -EINVAL;
5890
5891 mdp5_data = mfd_to_mdp5_data(mfd);
5892 if (!mdp5_data)
5893 return -EINVAL;
5894
5895 mdata = mfd_to_mdata(mfd);
5896 if (!mdata)
5897 return -EINVAL;
5898
5899 mdss_mdp_set_lm_flag(mfd);
5900
5901 if (!mdp5_data->ctl) {
5902 ctl = __mdss_mdp_overlay_ctl_init(mfd);
5903 if (IS_ERR_OR_NULL(ctl))
5904 return PTR_ERR(ctl);
5905 } else {
5906 ctl = mdp5_data->ctl;
5907 }
5908
5909 if (mfd->panel_info->type == WRITEBACK_PANEL && !mdp5_data->wfd) {
5910 mdp5_data->wfd = mdss_mdp_wfd_init(&mfd->pdev->dev, ctl);
5911 if (IS_ERR_OR_NULL(mdp5_data->wfd)) {
5912 rc = PTR_ERR(mdp5_data->wfd);
5913 goto panel_on;
5914 }
5915 }
5916
5917 if (mdss_fb_is_power_on(mfd)) {
5918 pr_debug("panel was never turned off\n");
5919 rc = mdss_mdp_ctl_start(ctl, false);
5920 goto panel_on;
5921 }
5922
5923 rc = mdss_mdp_ctl_intf_event(mdp5_data->ctl, MDSS_EVENT_RESET,
5924 NULL, false);
5925 if (rc)
5926 goto panel_on;
5927
5928 /* Skip the overlay start and kickoff for all displays
5929 * if handoff is pending. Previously we skipped it for DTV
5930 * panel and pluggable panels (bridge chip hdmi case). But
5931 * it does not cover the case where there is a non pluggable
5932 * tertiary display. Using the flag handoff_pending to skip
5933 * overlay start and kickoff should cover all cases
5934 * TODO: In the long run, the overlay start and kickoff
5935 * should not be skipped, instead, the handoff can be done
5936 */
5937 if (!mfd->panel_info->cont_splash_enabled &&
5938 !mdata->handoff_pending) {
5939 rc = mdss_mdp_overlay_start(mfd);
5940 if (rc)
5941 goto end;
5942 if (mfd->panel_info->type != WRITEBACK_PANEL) {
5943 atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
5944 rc = mdss_mdp_overlay_kickoff(mfd, NULL);
5945 }
5946 } else {
5947 rc = mdss_mdp_ctl_setup(ctl);
5948 if (rc)
5949 goto end;
5950 }
5951
5952panel_on:
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305953 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305954 pr_err("Failed to turn on fb%d\n", mfd->index);
5955 mdss_mdp_overlay_off(mfd);
5956 goto end;
5957 }
5958
5959end:
5960 return rc;
5961}
5962
5963static int mdss_mdp_handoff_cleanup_ctl(struct msm_fb_data_type *mfd)
5964{
5965 int rc;
5966 int need_cleanup;
5967 struct mdss_overlay_private *mdp5_data;
5968
5969 if (!mfd)
5970 return -ENODEV;
5971
5972 if (mfd->key != MFD_KEY)
5973 return -EINVAL;
5974
5975 mdp5_data = mfd_to_mdp5_data(mfd);
5976
5977 mdss_mdp_overlay_free_fb_pipe(mfd);
5978
5979 mutex_lock(&mdp5_data->list_lock);
5980 need_cleanup = !list_empty(&mdp5_data->pipes_cleanup) ||
5981 !list_empty(&mdp5_data->pipes_used);
5982 mutex_unlock(&mdp5_data->list_lock);
5983
5984 if (need_cleanup)
5985 mdss_mdp_overlay_kickoff(mfd, NULL);
5986
5987 rc = mdss_mdp_ctl_stop(mdp5_data->ctl, mfd->panel_power_state);
5988 if (!rc) {
5989 if (mdss_fb_is_power_off(mfd)) {
5990 mutex_lock(&mdp5_data->list_lock);
5991 __mdss_mdp_overlay_free_list_purge(mfd);
5992 mutex_unlock(&mdp5_data->list_lock);
5993 }
5994 }
5995
5996 rc = mdss_mdp_splash_cleanup(mfd, false);
5997 if (rc)
5998 pr_err("%s: failed splash clean up %d\n", __func__, rc);
5999
6000 return rc;
6001}
6002
6003static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
6004{
6005 int rc;
6006 struct mdss_overlay_private *mdp5_data;
6007 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
6008 struct mdss_mdp_mixer *mixer;
6009 int need_cleanup;
6010 int retire_cnt;
6011 bool destroy_ctl = false;
6012
6013 if (!mfd)
6014 return -ENODEV;
6015
6016 if (mfd->key != MFD_KEY)
6017 return -EINVAL;
6018
6019 mdp5_data = mfd_to_mdp5_data(mfd);
6020
6021 if (!mdp5_data || !mdp5_data->ctl) {
6022 pr_err("ctl not initialized\n");
6023 return -ENODEV;
6024 }
6025
6026 /*
6027 * Keep a reference to the runtime pm until the overlay is turned
6028 * off, and then release this last reference at the end. This will
6029 * help in distinguishing between idle power collapse versus suspend
6030 * power collapse
6031 */
6032 pm_runtime_get_sync(&mfd->pdev->dev);
6033
6034 if (mdss_fb_is_power_on_lp(mfd)) {
6035 pr_debug("panel not turned off. keeping overlay on\n");
6036 goto ctl_stop;
6037 }
6038
6039 mutex_lock(&mdp5_data->ov_lock);
6040
6041 mdss_mdp_overlay_free_fb_pipe(mfd);
6042
6043 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_LEFT);
6044 if (mixer)
6045 mixer->cursor_enabled = 0;
6046
6047 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_RIGHT);
6048 if (mixer)
6049 mixer->cursor_enabled = 0;
6050
6051 mutex_lock(&mdp5_data->list_lock);
6052 need_cleanup = !list_empty(&mdp5_data->pipes_cleanup);
6053 mutex_unlock(&mdp5_data->list_lock);
6054 mutex_unlock(&mdp5_data->ov_lock);
6055
6056 destroy_ctl = !mfd->ref_cnt || mfd->panel_reconfig;
6057
6058 mutex_lock(&mfd->switch_lock);
6059 if (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) {
6060 destroy_ctl = true;
6061 need_cleanup = false;
6062 pr_warn("fb%d blank while mode switch (%d) in progress\n",
6063 mfd->index, mfd->switch_state);
6064 mdss_mdp_handle_invalid_switch_state(mfd);
6065 }
6066 mutex_unlock(&mfd->switch_lock);
6067
6068 if (need_cleanup) {
6069 pr_debug("cleaning up pipes on fb%d\n", mfd->index);
6070 if (mdata->handoff_pending)
6071 mdp5_data->allow_kickoff = true;
6072
6073 mdss_mdp_overlay_kickoff(mfd, NULL);
6074 } else if (!mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) {
6075 if (mfd->panel_reconfig) {
6076 if (mfd->panel_info->cont_splash_enabled)
6077 mdss_mdp_handoff_cleanup_ctl(mfd);
6078
6079 mdp5_data->borderfill_enable = false;
6080 mdss_mdp_ctl_destroy(mdp5_data->ctl);
6081 mdp5_data->ctl = NULL;
6082 }
6083 goto end;
6084 }
6085
6086 /*
6087 * If retire fences are still active wait for a vsync time
6088 * for retire fence to be updated.
6089 * As a last resort signal the timeline if vsync doesn't arrive.
6090 */
6091 mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
6092 retire_cnt = mdp5_data->retire_cnt;
6093 mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
6094 if (retire_cnt) {
6095 u32 fps = mdss_panel_get_framerate(mfd->panel_info,
6096 FPS_RESOLUTION_HZ);
6097 u32 vsync_time = 1000 / (fps ? : DEFAULT_FRAME_RATE);
6098
6099 msleep(vsync_time);
6100
6101 mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
6102 retire_cnt = mdp5_data->retire_cnt;
6103 mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
6104 __vsync_retire_signal(mfd, retire_cnt);
6105
6106 /*
6107 * the retire work can still schedule after above retire_signal
6108 * api call. Flush workqueue guarantees that current caller
6109 * context is blocked till retire_work finishes. Any work
6110 * schedule after flush call should not cause any issue because
6111 * retire_signal api checks for retire_cnt with sync_mutex lock.
6112 */
6113
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05306114 kthread_flush_work(&mdp5_data->vsync_work);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306115 }
6116
6117ctl_stop:
6118 mutex_lock(&mdp5_data->ov_lock);
6119 /* set the correct pipe_mapped before ctl_stop */
6120 mdss_mdp_mixer_update_pipe_map(mdp5_data->ctl,
6121 MDSS_MDP_MIXER_MUX_LEFT);
6122 mdss_mdp_mixer_update_pipe_map(mdp5_data->ctl,
6123 MDSS_MDP_MIXER_MUX_RIGHT);
6124 rc = mdss_mdp_ctl_stop(mdp5_data->ctl, mfd->panel_power_state);
6125 if (rc == 0) {
6126 if (mdss_fb_is_power_off(mfd)) {
6127 mutex_lock(&mdp5_data->list_lock);
6128 __mdss_mdp_overlay_free_list_purge(mfd);
6129 if (!mfd->ref_cnt)
6130 mdss_mdp_overlay_buf_deinit(mfd);
6131 mutex_unlock(&mdp5_data->list_lock);
6132 mdss_mdp_ctl_notifier_unregister(mdp5_data->ctl,
6133 &mfd->mdp_sync_pt_data.notifier);
6134
6135 if (destroy_ctl) {
6136 mdp5_data->borderfill_enable = false;
6137 mdss_mdp_ctl_destroy(mdp5_data->ctl);
6138 mdp5_data->ctl = NULL;
6139 }
6140
6141 atomic_dec(&mdp5_data->mdata->active_intf_cnt);
6142
6143 if (!mdp5_data->mdata->idle_pc_enabled ||
6144 (mfd->panel_info->type != MIPI_CMD_PANEL)) {
6145 rc = pm_runtime_put(&mfd->pdev->dev);
6146 if (rc)
6147 pr_err("unable to suspend w/pm_runtime_put (%d)\n",
6148 rc);
6149 }
6150 }
6151 }
6152 mutex_unlock(&mdp5_data->ov_lock);
6153
6154 if (mdp5_data->wfd) {
6155 mdss_mdp_wfd_deinit(mdp5_data->wfd);
6156 mdp5_data->wfd = NULL;
6157 }
6158
6159end:
6160 /* Release the last reference to the runtime device */
6161 rc = pm_runtime_put(&mfd->pdev->dev);
6162 if (rc)
6163 pr_err("unable to suspend w/pm_runtime_put (%d)\n", rc);
6164
6165 return rc;
6166}
6167
6168static int __mdss_mdp_ctl_handoff(struct msm_fb_data_type *mfd,
6169 struct mdss_mdp_ctl *ctl, struct mdss_data_type *mdata)
6170{
6171 int rc = 0;
6172 int i, j;
6173 u32 mixercfg;
6174 struct mdss_mdp_pipe *pipe = NULL;
6175 struct mdss_overlay_private *mdp5_data;
6176
6177 if (!ctl || !mdata)
6178 return -EINVAL;
6179
6180 mdp5_data = mfd_to_mdp5_data(mfd);
6181
6182 for (i = 0; i < mdata->nmixers_intf; i++) {
6183 mixercfg = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_LAYER(i));
6184 pr_debug("for lm%d mixercfg = 0x%09x\n", i, mixercfg);
6185
6186 j = MDSS_MDP_SSPP_VIG0;
6187 for (; j < MDSS_MDP_SSPP_CURSOR0 && mixercfg; j++) {
6188 u32 cfg = j * 3;
6189
6190 if ((j == MDSS_MDP_SSPP_VIG3) ||
6191 (j == MDSS_MDP_SSPP_RGB3)) {
6192 /* Add 2 to account for Cursor & Border bits */
6193 cfg += 2;
6194 }
6195 if (mixercfg & (0x7 << cfg)) {
6196 pr_debug("Pipe %d staged\n", j);
6197 /* bootloader display always uses RECT0 */
6198 pipe = mdss_mdp_pipe_search(mdata, BIT(j),
6199 MDSS_MDP_PIPE_RECT0);
6200 if (!pipe) {
6201 pr_warn("Invalid pipe %d staged\n", j);
6202 continue;
6203 }
6204
6205 rc = mdss_mdp_pipe_handoff(pipe);
6206 if (rc) {
6207 pr_err("Failed to handoff pipe%d\n",
6208 pipe->num);
6209 goto exit;
6210 }
6211
6212 pipe->mfd = mfd;
6213 mutex_lock(&mdp5_data->list_lock);
6214 list_add(&pipe->list, &mdp5_data->pipes_used);
6215 mutex_unlock(&mdp5_data->list_lock);
6216
6217 rc = mdss_mdp_mixer_handoff(ctl, i, pipe);
6218 if (rc) {
6219 pr_err("failed to handoff mix%d\n", i);
6220 goto exit;
6221 }
6222 }
6223 }
6224 }
6225exit:
6226 return rc;
6227}
6228
6229/**
6230 * mdss_mdp_overlay_handoff() - Read MDP registers to handoff an active ctl path
6231 * @mfd: Msm frame buffer structure associated with the fb device.
6232 *
6233 * This function populates the MDP software structures with the current state of
6234 * the MDP hardware to handoff any active control path for the framebuffer
6235 * device. This is needed to identify any ctl, mixers and pipes being set up by
6236 * the bootloader to display the splash screen when the continuous splash screen
6237 * feature is enabled in kernel.
6238 */
6239static int mdss_mdp_overlay_handoff(struct msm_fb_data_type *mfd)
6240{
6241 int rc = 0;
6242 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
6243 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
6244 struct mdss_mdp_ctl *ctl = NULL;
6245 struct mdss_mdp_ctl *sctl = NULL;
6246
6247 if (!mdp5_data->ctl) {
6248 ctl = __mdss_mdp_overlay_ctl_init(mfd);
6249 if (IS_ERR_OR_NULL(ctl)) {
6250 rc = PTR_ERR(ctl);
6251 goto error;
6252 }
6253 } else {
6254 ctl = mdp5_data->ctl;
6255 }
6256
6257 /*
6258 * vsync interrupt needs on during continuous splash, this is
6259 * to initialize necessary ctl members here.
6260 */
6261 rc = mdss_mdp_ctl_start(ctl, true);
6262 if (rc) {
6263 pr_err("Failed to initialize ctl\n");
6264 goto error;
6265 }
6266
6267 ctl->clk_rate = mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false);
6268 pr_debug("Set the ctl clock rate to %d Hz\n", ctl->clk_rate);
6269
6270 rc = __mdss_mdp_ctl_handoff(mfd, ctl, mdata);
6271 if (rc) {
6272 pr_err("primary ctl handoff failed. rc=%d\n", rc);
6273 goto error;
6274 }
6275
6276 if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
6277 sctl = mdss_mdp_get_split_ctl(ctl);
6278 if (!sctl) {
6279 pr_err("cannot get secondary ctl. fail the handoff\n");
6280 rc = -EPERM;
6281 goto error;
6282 }
6283 rc = __mdss_mdp_ctl_handoff(mfd, sctl, mdata);
6284 if (rc) {
6285 pr_err("secondary ctl handoff failed. rc=%d\n", rc);
6286 goto error;
6287 }
6288 }
6289
6290 rc = mdss_mdp_smp_handoff(mdata);
6291 if (rc)
6292 pr_err("Failed to handoff smps\n");
6293
6294 mdp5_data->handoff = true;
6295
6296error:
6297 if (rc && ctl) {
6298 mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_RGB);
6299 mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_VIG);
6300 mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_DMA);
6301 mdss_mdp_ctl_destroy(ctl);
6302 mdp5_data->ctl = NULL;
6303 mdp5_data->handoff = false;
6304 }
6305
6306 return rc;
6307}
6308
6309static void __vsync_retire_handle_vsync(struct mdss_mdp_ctl *ctl, ktime_t t)
6310{
6311 struct msm_fb_data_type *mfd = ctl->mfd;
6312 struct mdss_overlay_private *mdp5_data;
6313
6314 if (!mfd || !mfd->mdp.private1) {
6315 pr_warn("Invalid handle for vsync\n");
6316 return;
6317 }
6318
6319 mdp5_data = mfd_to_mdp5_data(mfd);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05306320 kthread_queue_work(&mdp5_data->worker, &mdp5_data->vsync_work);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306321}
6322
6323static void __vsync_retire_work_handler(struct kthread_work *work)
6324{
6325 struct mdss_overlay_private *mdp5_data =
6326 container_of(work, typeof(*mdp5_data), vsync_work);
6327
6328 if (!mdp5_data->ctl || !mdp5_data->ctl->mfd)
6329 return;
6330
6331 if (!mdp5_data->ctl->ops.remove_vsync_handler)
6332 return;
6333
6334 __vsync_retire_signal(mdp5_data->ctl->mfd, 1);
6335}
6336
6337static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
6338{
6339 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
6340
6341 mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
6342 if (mdp5_data->retire_cnt > 0) {
6343 sw_sync_timeline_inc(mdp5_data->vsync_timeline, val);
6344 mdp5_data->retire_cnt -= min(val, mdp5_data->retire_cnt);
6345 pr_debug("Retire signaled! timeline val=%d remaining=%d\n",
6346 mdp5_data->vsync_timeline->value,
6347 mdp5_data->retire_cnt);
6348
6349 if (mdp5_data->retire_cnt == 0) {
6350 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
6351 mdp5_data->ctl->ops.remove_vsync_handler(mdp5_data->ctl,
6352 &mdp5_data->vsync_retire_handler);
6353 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
6354 }
6355 }
6356 mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
6357}
6358
6359static struct sync_fence *
6360__vsync_retire_get_fence(struct msm_sync_pt_data *sync_pt_data)
6361{
6362 struct msm_fb_data_type *mfd;
6363 struct mdss_overlay_private *mdp5_data;
6364 struct mdss_mdp_ctl *ctl;
6365 int value;
6366
6367 mfd = container_of(sync_pt_data, typeof(*mfd), mdp_sync_pt_data);
6368 mdp5_data = mfd_to_mdp5_data(mfd);
6369
6370 if (!mdp5_data || !mdp5_data->ctl)
6371 return ERR_PTR(-ENODEV);
6372
6373 ctl = mdp5_data->ctl;
6374 if (!ctl->ops.add_vsync_handler)
6375 return ERR_PTR(-EOPNOTSUPP);
6376
6377 if (!mdss_mdp_ctl_is_power_on(ctl)) {
6378 pr_debug("fb%d vsync pending first update\n", mfd->index);
6379 return ERR_PTR(-EPERM);
6380 }
6381
6382 value = mdp5_data->vsync_timeline->value + 1 + mdp5_data->retire_cnt;
6383 mdp5_data->retire_cnt++;
6384
6385 return mdss_fb_sync_get_fence(mdp5_data->vsync_timeline,
6386 "mdp-retire", value);
6387}
6388
6389static int __vsync_set_vsync_handler(struct msm_fb_data_type *mfd)
6390{
6391 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
6392 struct mdss_mdp_ctl *ctl;
6393 int rc;
6394 int retire_cnt;
6395
6396 ctl = mdp5_data->ctl;
6397 mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
6398 retire_cnt = mdp5_data->retire_cnt;
6399 mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
6400 if (!retire_cnt || mdp5_data->vsync_retire_handler.enabled)
6401 return 0;
6402
6403 if (!ctl->ops.add_vsync_handler)
6404 return -EOPNOTSUPP;
6405
6406 if (!mdss_mdp_ctl_is_power_on(ctl)) {
6407 pr_debug("fb%d vsync pending first update\n", mfd->index);
6408 return -EPERM;
6409 }
6410
6411 rc = ctl->ops.add_vsync_handler(ctl,
6412 &mdp5_data->vsync_retire_handler);
6413 return rc;
6414}
6415
6416static int __vsync_retire_setup(struct msm_fb_data_type *mfd)
6417{
6418 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
6419 char name[24];
6420 struct sched_param param = { .sched_priority = 5 };
6421
6422 snprintf(name, sizeof(name), "mdss_fb%d_retire", mfd->index);
6423 mdp5_data->vsync_timeline = sw_sync_timeline_create(name);
6424 if (mdp5_data->vsync_timeline == NULL) {
6425 pr_err("cannot vsync create time line");
6426 return -ENOMEM;
6427 }
6428
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05306429 kthread_init_worker(&mdp5_data->worker);
6430 kthread_init_work(&mdp5_data->vsync_work, __vsync_retire_work_handler);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306431
6432 mdp5_data->thread = kthread_run(kthread_worker_fn,
6433 &mdp5_data->worker,
6434 "vsync_retire_work");
6435
6436 if (IS_ERR(mdp5_data->thread)) {
6437 pr_err("unable to start vsync thread\n");
6438 mdp5_data->thread = NULL;
6439 return -ENOMEM;
6440 }
6441
6442 sched_setscheduler(mdp5_data->thread, SCHED_FIFO, &param);
6443
6444 mfd->mdp_sync_pt_data.get_retire_fence = __vsync_retire_get_fence;
6445
6446 mdp5_data->vsync_retire_handler.vsync_handler =
6447 __vsync_retire_handle_vsync;
6448 mdp5_data->vsync_retire_handler.cmd_post_flush = false;
6449
6450 return 0;
6451}
6452
6453static int mdss_mdp_update_panel_info(struct msm_fb_data_type *mfd,
6454 int mode, int dest_ctrl)
6455{
6456 int ret = 0;
6457 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
6458 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
6459 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
6460 struct mdss_panel_data *pdata;
6461 struct mdss_mdp_ctl *sctl;
6462
6463 if (ctl == NULL) {
6464 pr_debug("ctl not initialized\n");
6465 return 0;
6466 }
6467
6468 ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_UPDATE_PANEL_DATA,
6469 (void *)(unsigned long)mode, CTL_INTF_EVENT_FLAG_DEFAULT);
6470 if (ret)
6471 pr_err("Dynamic switch to %s mode failed!\n",
6472 mode ? "command" : "video");
6473
6474 if (dest_ctrl) {
6475 /*
6476 * Destroy current ctrl structure as this is
6477 * going to be re-initialized with the requested mode.
6478 */
6479 mdss_mdp_ctl_destroy(mdp5_data->ctl);
6480 mdp5_data->ctl = NULL;
6481 } else {
6482 pdata = dev_get_platdata(&mfd->pdev->dev);
6483
6484 if (mdp5_data->mdata->has_pingpong_split &&
6485 pdata->panel_info.use_pingpong_split)
6486 mfd->split_mode = MDP_PINGPONG_SPLIT;
6487 /*
6488 * Dynamic change so we need to reconfig instead of
6489 * destroying current ctrl structure.
6490 */
6491 mdss_mdp_ctl_reconfig(ctl, pdata);
6492
6493 /*
6494 * Set flag when dynamic resolution switch happens before
6495 * handoff of cont-splash
6496 */
6497 if (mdata->handoff_pending)
6498 ctl->switch_with_handoff = true;
6499
6500 sctl = mdss_mdp_get_split_ctl(ctl);
6501 if (sctl) {
6502 if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
6503 mdss_mdp_ctl_reconfig(sctl, pdata->next);
6504 sctl->border_x_off +=
6505 pdata->panel_info.lcdc.border_left +
6506 pdata->panel_info.lcdc.border_right;
6507 } else {
6508 /*
6509 * todo: need to revisit this and properly
6510 * cleanup slave resources
6511 */
6512 mdss_mdp_ctl_destroy(sctl);
6513 ctl->mixer_right = NULL;
6514 }
6515 } else if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
6516 /* enable split display for the first time */
6517 ret = mdss_mdp_ctl_split_display_setup(ctl,
6518 pdata->next);
6519 if (ret) {
6520 mdss_mdp_ctl_destroy(ctl);
6521 mdp5_data->ctl = NULL;
6522 }
6523 }
6524 }
6525
6526 return ret;
6527}
6528
6529int mdss_mdp_input_event_handler(struct msm_fb_data_type *mfd)
6530{
6531 int rc = 0;
6532 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
6533
6534 if (ctl && mdss_panel_is_power_on(ctl->power_state) &&
6535 ctl->ops.early_wake_up_fnc)
6536 rc = ctl->ops.early_wake_up_fnc(ctl);
6537
6538 return rc;
6539}
6540
6541static void mdss_mdp_signal_retire_fence(struct msm_fb_data_type *mfd,
6542 int retire_cnt)
6543{
6544 __vsync_retire_signal(mfd, retire_cnt);
6545 pr_debug("Signaled (%d) pending retire fence\n", retire_cnt);
6546}
6547
6548int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
6549{
6550 struct device *dev = mfd->fbi->dev;
6551 struct msm_mdp_interface *mdp5_interface = &mfd->mdp;
6552 struct mdss_overlay_private *mdp5_data = NULL;
6553 struct irq_info *mdss_irq;
6554 int rc;
6555
6556 mdp5_data = kcalloc(1, sizeof(struct mdss_overlay_private), GFP_KERNEL);
6557 if (!mdp5_data)
6558 return -ENOMEM;
6559
6560 mdp5_data->frc_fsm
6561 = kcalloc(1, sizeof(struct mdss_mdp_frc_fsm), GFP_KERNEL);
6562 if (!mdp5_data->frc_fsm) {
6563 rc = -ENOMEM;
6564 pr_err("fail to allocate mdp5 frc fsm structure\n");
6565 goto init_fail1;
6566 }
6567
6568 mdp5_data->mdata = dev_get_drvdata(mfd->pdev->dev.parent);
6569 if (!mdp5_data->mdata) {
6570 pr_err("unable to initialize overlay for fb%d\n", mfd->index);
6571 rc = -ENODEV;
6572 goto init_fail;
6573 }
6574
6575 mdp5_interface->on_fnc = mdss_mdp_overlay_on;
6576 mdp5_interface->off_fnc = mdss_mdp_overlay_off;
6577 mdp5_interface->release_fnc = __mdss_mdp_overlay_release_all;
6578 mdp5_interface->do_histogram = NULL;
6579 if (mdp5_data->mdata->ncursor_pipes)
6580 mdp5_interface->cursor_update = mdss_mdp_hw_cursor_pipe_update;
6581 else
6582 mdp5_interface->cursor_update = mdss_mdp_hw_cursor_update;
6583 mdp5_interface->async_position_update =
6584 mdss_mdp_async_position_update;
6585 mdp5_interface->dma_fnc = mdss_mdp_overlay_pan_display;
6586 mdp5_interface->ioctl_handler = mdss_mdp_overlay_ioctl_handler;
6587 mdp5_interface->kickoff_fnc = mdss_mdp_overlay_kickoff;
6588 mdp5_interface->mode_switch = mdss_mode_switch;
6589 mdp5_interface->mode_switch_post = mdss_mode_switch_post;
6590 mdp5_interface->pre_commit_fnc = mdss_mdp_overlay_precommit;
6591 mdp5_interface->splash_init_fnc = mdss_mdp_splash_init;
6592 mdp5_interface->configure_panel = mdss_mdp_update_panel_info;
6593 mdp5_interface->input_event_handler = mdss_mdp_input_event_handler;
6594 mdp5_interface->signal_retire_fence = mdss_mdp_signal_retire_fence;
6595
6596 if (mfd->panel_info->type == WRITEBACK_PANEL) {
6597 mdp5_interface->atomic_validate =
6598 mdss_mdp_layer_atomic_validate_wfd;
6599 mdp5_interface->pre_commit = mdss_mdp_layer_pre_commit_wfd;
6600 mdp5_interface->is_config_same = mdss_mdp_wfd_is_config_same;
6601 } else {
6602 mdp5_interface->atomic_validate =
6603 mdss_mdp_layer_atomic_validate;
6604 mdp5_interface->pre_commit = mdss_mdp_layer_pre_commit;
6605 }
6606
6607 INIT_LIST_HEAD(&mdp5_data->pipes_used);
6608 INIT_LIST_HEAD(&mdp5_data->pipes_cleanup);
6609 INIT_LIST_HEAD(&mdp5_data->pipes_destroy);
6610 INIT_LIST_HEAD(&mdp5_data->bufs_pool);
6611 INIT_LIST_HEAD(&mdp5_data->bufs_chunks);
6612 INIT_LIST_HEAD(&mdp5_data->bufs_used);
6613 INIT_LIST_HEAD(&mdp5_data->bufs_freelist);
6614 INIT_LIST_HEAD(&mdp5_data->rot_proc_list);
6615 mutex_init(&mdp5_data->list_lock);
6616 mutex_init(&mdp5_data->ov_lock);
6617 mutex_init(&mdp5_data->dfps_lock);
6618 mdp5_data->hw_refresh = true;
6619 mdp5_data->cursor_ndx[CURSOR_PIPE_LEFT] = MSMFB_NEW_REQUEST;
6620 mdp5_data->cursor_ndx[CURSOR_PIPE_RIGHT] = MSMFB_NEW_REQUEST;
6621 mdp5_data->allow_kickoff = false;
6622
6623 mfd->mdp.private1 = mdp5_data;
6624 mfd->wait_for_kickoff = true;
6625
6626 rc = mdss_mdp_overlay_fb_parse_dt(mfd);
6627 if (rc)
6628 return rc;
6629
6630 /*
6631 * disable BWC if primary panel is video mode on specific
6632 * chipsets to workaround HW problem.
6633 */
6634 if (mdss_has_quirk(mdp5_data->mdata, MDSS_QUIRK_BWCPANIC) &&
6635 mfd->panel_info->type == MIPI_VIDEO_PANEL && (mfd->index == 0))
6636 mdp5_data->mdata->has_bwc = false;
6637
6638 mfd->panel_orientation = mfd->panel_info->panel_orientation;
6639
6640 if ((mfd->panel_info->panel_orientation & MDP_FLIP_LR) &&
6641 (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY))
6642 mdp5_data->mixer_swap = true;
6643
6644 rc = sysfs_create_group(&dev->kobj, &mdp_overlay_sysfs_group);
6645 if (rc) {
6646 pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
6647 goto init_fail;
6648 }
6649
6650 mdp5_data->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd,
6651 "vsync_event");
6652 if (!mdp5_data->vsync_event_sd) {
6653 pr_err("vsync_event sysfs lookup failed\n");
6654 rc = -ENODEV;
6655 goto init_fail;
6656 }
6657
6658 mdp5_data->lineptr_event_sd = sysfs_get_dirent(dev->kobj.sd,
6659 "lineptr_event");
6660 if (!mdp5_data->lineptr_event_sd) {
6661 pr_err("lineptr_event sysfs lookup failed\n");
6662 rc = -ENODEV;
6663 goto init_fail;
6664 }
6665
6666 mdp5_data->hist_event_sd = sysfs_get_dirent(dev->kobj.sd,
6667 "hist_event");
6668 if (!mdp5_data->hist_event_sd) {
6669 pr_err("hist_event sysfs lookup failed\n");
6670 rc = -ENODEV;
6671 goto init_fail;
6672 }
6673
6674 mdp5_data->bl_event_sd = sysfs_get_dirent(dev->kobj.sd,
6675 "bl_event");
6676 if (!mdp5_data->bl_event_sd) {
6677 pr_err("bl_event sysfs lookup failed\n");
6678 rc = -ENODEV;
6679 goto init_fail;
6680 }
6681
6682 mdp5_data->ad_event_sd = sysfs_get_dirent(dev->kobj.sd,
6683 "ad_event");
6684 if (!mdp5_data->ad_event_sd) {
6685 pr_err("ad_event sysfs lookup failed\n");
6686 rc = -ENODEV;
6687 goto init_fail;
6688 }
6689
6690 mdp5_data->ad_bl_event_sd = sysfs_get_dirent(dev->kobj.sd,
6691 "ad_bl_event");
6692 if (!mdp5_data->ad_bl_event_sd) {
6693 pr_err("ad_bl_event sysfs lookup failed\n");
6694 rc = -ENODEV;
6695 goto init_fail;
6696 }
6697
6698 rc = sysfs_create_link_nowarn(&dev->kobj,
6699 &mdp5_data->mdata->pdev->dev.kobj, "mdp");
6700 if (rc)
6701 pr_warn("problem creating link to mdp sysfs\n");
6702
6703 rc = sysfs_create_link_nowarn(&dev->kobj,
6704 &mfd->pdev->dev.kobj, "mdss_fb");
6705 if (rc)
6706 pr_warn("problem creating link to mdss_fb sysfs\n");
6707
6708 if (mfd->panel_info->type == MIPI_VIDEO_PANEL ||
6709 mfd->panel_info->type == DTV_PANEL) {
6710 rc = sysfs_create_group(&dev->kobj,
6711 &dynamic_fps_fs_attrs_group);
6712 if (rc) {
6713 pr_err("Error dfps sysfs creation ret=%d\n", rc);
6714 goto init_fail;
6715 }
6716 }
6717
6718 if (mfd->panel_info->mipi.dms_mode ||
6719 mfd->panel_info->type == MIPI_CMD_PANEL) {
6720 rc = __vsync_retire_setup(mfd);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05306721 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306722 pr_err("unable to create vsync timeline\n");
6723 goto init_fail;
6724 }
6725 }
6726 mfd->mdp_sync_pt_data.async_wait_fences = true;
6727
6728 pm_runtime_set_suspended(&mfd->pdev->dev);
6729 pm_runtime_enable(&mfd->pdev->dev);
6730
6731 kobject_uevent(&dev->kobj, KOBJ_ADD);
6732 pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
6733
6734 mdss_irq = mdss_intr_line();
6735
6736 /* Adding event timer only for primary panel */
6737 if ((mfd->index == 0) && (mfd->panel_info->type != WRITEBACK_PANEL)) {
6738 mdp5_data->cpu_pm_hdl = add_event_timer(mdss_irq->irq,
6739 mdss_mdp_ctl_event_timer, (void *)mdp5_data);
6740 if (!mdp5_data->cpu_pm_hdl)
6741 pr_warn("%s: unable to add event timer\n", __func__);
6742 }
6743
6744 if (mfd->panel_info->cont_splash_enabled) {
6745 rc = mdss_mdp_overlay_handoff(mfd);
6746 if (rc) {
6747 /*
6748 * Even though handoff failed, it is not fatal.
6749 * MDP can continue, just that we would have a longer
6750 * delay in transitioning from splash screen to boot
6751 * animation
6752 */
6753 pr_warn("Overlay handoff failed for fb%d. rc=%d\n",
6754 mfd->index, rc);
6755 rc = 0;
6756 }
6757 }
6758 mdp5_data->dyn_pu_state = mfd->panel_info->partial_update_enabled;
6759
6760 if (mdss_mdp_pp_overlay_init(mfd))
6761 pr_warn("Failed to initialize pp overlay data.\n");
6762 return rc;
6763init_fail:
6764 kfree(mdp5_data->frc_fsm);
6765init_fail1:
6766 kfree(mdp5_data);
6767 return rc;
6768}
6769
6770static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd)
6771{
6772 int rc = 0;
6773 struct platform_device *pdev = mfd->pdev;
6774 struct mdss_overlay_private *mdp5_mdata = mfd_to_mdp5_data(mfd);
6775
6776 mdp5_mdata->mixer_swap = of_property_read_bool(pdev->dev.of_node,
6777 "qcom,mdss-mixer-swap");
6778 if (mdp5_mdata->mixer_swap) {
6779 pr_info("mixer swap is enabled for fb device=%s\n",
6780 pdev->name);
6781 }
6782
6783 return rc;
6784}
6785
6786static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
6787 struct mdp_scale_luts_info *lut_tbl)
6788{
6789 struct mdss_mdp_qseed3_lut_tbl *qseed3_lut_tbl;
6790 int ret;
6791
6792 if (!mdata->scaler_off)
6793 return -EFAULT;
6794
6795 qseed3_lut_tbl = &mdata->scaler_off->lut_tbl;
6796 if ((lut_tbl->dir_lut_size !=
6797 DIR_LUT_IDX * DIR_LUT_COEFFS * sizeof(uint32_t)) ||
6798 (lut_tbl->cir_lut_size !=
6799 CIR_LUT_IDX * CIR_LUT_COEFFS * sizeof(uint32_t)) ||
6800 (lut_tbl->sep_lut_size !=
6801 SEP_LUT_IDX * SEP_LUT_COEFFS * sizeof(uint32_t)))
6802 return -EINVAL;
6803
6804 if (!qseed3_lut_tbl->dir_lut) {
6805 qseed3_lut_tbl->dir_lut = devm_kzalloc(&mdata->pdev->dev,
6806 lut_tbl->dir_lut_size,
6807 GFP_KERNEL);
6808 if (!qseed3_lut_tbl->dir_lut) {
6809 ret = -ENOMEM;
6810 goto fail;
6811 }
6812 }
6813
6814 if (!qseed3_lut_tbl->cir_lut) {
6815 qseed3_lut_tbl->cir_lut = devm_kzalloc(&mdata->pdev->dev,
6816 lut_tbl->cir_lut_size,
6817 GFP_KERNEL);
6818 if (!qseed3_lut_tbl->cir_lut) {
6819 ret = -ENOMEM;
6820 goto fail;
6821 }
6822 }
6823
6824 if (!qseed3_lut_tbl->sep_lut) {
6825 qseed3_lut_tbl->sep_lut = devm_kzalloc(&mdata->pdev->dev,
6826 lut_tbl->sep_lut_size,
6827 GFP_KERNEL);
6828 if (!qseed3_lut_tbl->sep_lut) {
6829 ret = -ENOMEM;
6830 goto fail;
6831 }
6832 }
6833
6834 /* Invalidate before updating */
6835 qseed3_lut_tbl->valid = false;
6836
6837
6838 if (copy_from_user(qseed3_lut_tbl->dir_lut,
6839 (void *)(unsigned long)lut_tbl->dir_lut,
6840 lut_tbl->dir_lut_size)) {
6841 ret = -EINVAL;
6842 goto err;
6843 }
6844
6845 if (copy_from_user(qseed3_lut_tbl->cir_lut,
6846 (void *)(unsigned long)lut_tbl->cir_lut,
6847 lut_tbl->cir_lut_size)) {
6848 ret = -EINVAL;
6849 goto err;
6850 }
6851
6852 if (copy_from_user(qseed3_lut_tbl->sep_lut,
6853 (void *)(unsigned long)lut_tbl->sep_lut,
6854 lut_tbl->sep_lut_size)) {
6855 ret = -EINVAL;
6856 goto err;
6857 }
6858
6859 qseed3_lut_tbl->valid = true;
6860 return ret;
6861
6862fail:
6863 kfree(qseed3_lut_tbl->dir_lut);
6864 kfree(qseed3_lut_tbl->cir_lut);
6865 kfree(qseed3_lut_tbl->sep_lut);
6866err:
6867 qseed3_lut_tbl->valid = false;
6868 return ret;
6869}
6870
6871static int mdss_mdp_set_cfg(struct msm_fb_data_type *mfd,
6872 struct mdp_set_cfg *cfg)
6873{
6874 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
6875 int ret = -EINVAL;
6876 struct mdp_scale_luts_info luts_info;
6877
6878 switch (cfg->flags) {
6879 case MDP_QSEED3_LUT_CFG:
6880 if (cfg->len != sizeof(luts_info)) {
6881 pr_err("invalid length %d expected %zd\n", cfg->len,
6882 sizeof(luts_info));
6883 ret = -EINVAL;
6884 break;
6885 }
6886 ret = copy_from_user(&luts_info,
6887 (void *)(unsigned long)cfg->payload, cfg->len);
6888 if (ret) {
6889 pr_err("qseed3 lut copy failed ret %d\n", ret);
6890 ret = -EFAULT;
6891 break;
6892 }
6893 ret = mdss_mdp_scaler_lut_init(mdata, &luts_info);
6894 break;
6895 default:
6896 break;
6897 }
6898 return ret;
6899}