blob: 80708aa627f4c8916f1dbfc696d345cad01f3454 [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/dma-buf.h>
17#include <linux/dma-mapping.h>
18#include <linux/errno.h>
19#include <linux/kernel.h>
20#include <linux/major.h>
21#include <linux/module.h>
22#include <linux/pm_runtime.h>
23#include <linux/uaccess.h>
24#include <linux/delay.h>
25#include <linux/msm_mdp.h>
26#include <linux/memblock.h>
27#include <linux/sort.h>
Sachin Bhayareeeb88892018-01-02 16:36:01 +053028#include <linux/kmemleak.h>
Sachin Bhayare3d3767e2018-01-02 21:10:57 +053029#include <linux/kthread.h>
Sachin Bhayareeeb88892018-01-02 16:36:01 +053030#include <asm/div64.h>
31
32#include <soc/qcom/event_timer.h>
33#include <linux/msm-bus.h>
34#include "mdss.h"
35#include "mdss_debug.h"
36#include "mdss_fb.h"
37#include "mdss_mdp.h"
38#include "mdss_smmu.h"
39#include "mdss_mdp_wfd.h"
40#include "mdss_dsi_clk.h"
Sachin Bhayare2b6d0042018-01-13 19:38:21 +053041#include "mdss_sync.h"
Sachin Bhayareeeb88892018-01-02 16:36:01 +053042
43#define VSYNC_PERIOD 16
44#define BORDERFILL_NDX 0x0BF000BF
45#define CHECK_BOUNDS(offset, size, max_size) \
46 (((size) > (max_size)) || ((offset) > ((max_size) - (size))))
47
48#define IS_RIGHT_MIXER_OV(flags, dst_x, left_lm_w) \
49 ((flags & MDSS_MDP_RIGHT_MIXER) || (dst_x >= left_lm_w))
50
51#define BUF_POOL_SIZE 32
52
53#define DFPS_DATA_MAX_HFP 8192
54#define DFPS_DATA_MAX_HBP 8192
55#define DFPS_DATA_MAX_HPW 8192
56#define DFPS_DATA_MAX_FPS 0x7fffffff
57#define DFPS_DATA_MAX_CLK_RATE 250000
58
59static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd);
60static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd);
61static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd);
62static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd);
63static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val);
64static int __vsync_set_vsync_handler(struct msm_fb_data_type *mfd);
65static int mdss_mdp_update_panel_info(struct msm_fb_data_type *mfd,
66 int mode, int dest_ctrl);
67static int mdss_mdp_set_cfg(struct msm_fb_data_type *mfd,
68 struct mdp_set_cfg *cfg);
69
70static inline bool is_ov_right_blend(struct mdp_rect *left_blend,
71 struct mdp_rect *right_blend, u32 left_lm_w)
72{
73 return (((left_blend->x + left_blend->w) == right_blend->x) &&
74 ((left_blend->x + left_blend->w) != left_lm_w) &&
75 (left_blend->x != right_blend->x) &&
76 (left_blend->y == right_blend->y) &&
77 (left_blend->h == right_blend->h));
78}
79
80/**
81 * __is_more_decimation_doable() -
82 * @pipe: pointer to pipe data structure
83 *
84 * if per pipe BW exceeds the limit and user
85 * has not requested decimation then return
86 * -E2BIG error back to user else try more
87 * decimation based on following table config.
88 *
89 * ----------------------------------------------------------
90 * error | split mode | src_split | v_deci | action |
91 * ------|------------|-----------|--------|----------------|
92 * | | | 00 | return error |
93 * | | enabled |--------|----------------|
94 * | | | >1 | more decmation |
95 * | yes |-----------|--------|----------------|
96 * | | | 00 | return error |
97 * | | disabled |--------|----------------|
98 * | | | >1 | return error |
99 * E2BIG |------------|-----------|--------|----------------|
100 * | | | 00 | return error |
101 * | | enabled |--------|----------------|
102 * | | | >1 | more decmation |
103 * | no |-----------|--------|----------------|
104 * | | | 00 | return error |
105 * | | disabled |--------|----------------|
106 * | | | >1 | more decmation |
107 * ----------------------------------------------------------
108 */
109static inline bool __is_more_decimation_doable(struct mdss_mdp_pipe *pipe)
110{
111 struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
112 struct msm_fb_data_type *mfd = pipe->mixer_left->ctl->mfd;
113
114 if (!mfd->split_mode && !pipe->vert_deci)
115 return false;
116 else if (mfd->split_mode && (!mdata->has_src_split ||
117 (mdata->has_src_split && !pipe->vert_deci)))
118 return false;
119 else
120 return true;
121}
122
123static struct mdss_mdp_pipe *__overlay_find_pipe(
124 struct msm_fb_data_type *mfd, u32 ndx)
125{
126 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
127 struct mdss_mdp_pipe *tmp, *pipe = NULL;
128
129 mutex_lock(&mdp5_data->list_lock);
130 list_for_each_entry(tmp, &mdp5_data->pipes_used, list) {
131 if (tmp->ndx == ndx) {
132 pipe = tmp;
133 break;
134 }
135 }
136 mutex_unlock(&mdp5_data->list_lock);
137
138 return pipe;
139}
140
141static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
142 struct mdp_overlay *req)
143{
144 struct mdss_mdp_pipe *pipe;
145
146 pipe = __overlay_find_pipe(mfd, req->id);
147 if (!pipe) {
148 pr_err("invalid pipe ndx=%x\n", req->id);
149 return pipe ? PTR_ERR(pipe) : -ENODEV;
150 }
151
152 *req = pipe->req_data;
153
154 return 0;
155}
156
157static int mdss_mdp_ov_xres_check(struct msm_fb_data_type *mfd,
158 struct mdp_overlay *req)
159{
160 u32 xres = 0;
161 u32 left_lm_w = left_lm_w_from_mfd(mfd);
162 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
163 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
164
165 if (IS_RIGHT_MIXER_OV(req->flags, req->dst_rect.x, left_lm_w)) {
166 if (mdata->has_src_split) {
167 xres = left_lm_w;
168
169 if (req->flags & MDSS_MDP_RIGHT_MIXER) {
170 pr_warn("invalid use of RIGHT_MIXER flag.\n");
171 /*
172 * if chip-set is capable of source split then
173 * all layers which are only on right LM should
174 * have their x offset relative to left LM's
175 * left-top or in other words relative to
176 * panel width.
177 * By modifying dst_x below, we are assuming
178 * that client is running in legacy mode
179 * chipset capable of source split.
180 */
181 if (req->dst_rect.x < left_lm_w)
182 req->dst_rect.x += left_lm_w;
183
184 req->flags &= ~MDSS_MDP_RIGHT_MIXER;
185 }
186 } else if (req->dst_rect.x >= left_lm_w) {
187 /*
188 * this is a step towards removing a reliance on
189 * MDSS_MDP_RIGHT_MIXER flags. With the new src split
190 * code, some clients of non-src-split chipsets have
191 * stopped sending MDSS_MDP_RIGHT_MIXER flag and
192 * modified their xres relative to full panel
193 * dimensions. In such cases, we need to deduct left
194 * layer mixer width before we program this HW.
195 */
196 req->dst_rect.x -= left_lm_w;
197 req->flags |= MDSS_MDP_RIGHT_MIXER;
198 }
199
200 if (ctl->mixer_right) {
201 xres += ctl->mixer_right->width;
202 } else {
203 pr_err("ov cannot be placed on right mixer\n");
204 return -EPERM;
205 }
206 } else {
207 if (ctl->mixer_left) {
208 xres = ctl->mixer_left->width;
209 } else {
210 pr_err("ov cannot be placed on left mixer\n");
211 return -EPERM;
212 }
213
214 if (mdata->has_src_split && ctl->mixer_right)
215 xres += ctl->mixer_right->width;
216 }
217
218 if (CHECK_BOUNDS(req->dst_rect.x, req->dst_rect.w, xres)) {
219 pr_err("dst_xres is invalid. dst_x:%d, dst_w:%d, xres:%d\n",
220 req->dst_rect.x, req->dst_rect.w, xres);
221 return -EOVERFLOW;
222 }
223
224 return 0;
225}
226
227int mdss_mdp_overlay_req_check(struct msm_fb_data_type *mfd,
228 struct mdp_overlay *req,
229 struct mdss_mdp_format_params *fmt)
230{
231 u32 yres;
232 u32 min_src_size, min_dst_size;
233 int content_secure;
234 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
235 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
236
237 yres = mfd->fbi->var.yres;
238
239 content_secure = (req->flags & MDP_SECURE_OVERLAY_SESSION);
240 if (!ctl->is_secure && content_secure &&
241 (mfd->panel.type == WRITEBACK_PANEL)) {
242 pr_debug("return due to security concerns\n");
243 return -EPERM;
244 }
245 if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102) {
246 min_src_size = fmt->is_yuv ? 2 : 1;
247 min_dst_size = 1;
248 } else {
249 min_src_size = fmt->is_yuv ? 10 : 5;
250 min_dst_size = 2;
251 }
252
253 if (req->z_order >= (mdata->max_target_zorder + MDSS_MDP_STAGE_0)) {
254 pr_err("zorder %d out of range\n", req->z_order);
255 return -ERANGE;
256 }
257
258 /*
259 * Cursor overlays are only supported for targets
260 * with dedicated cursors within VP
261 */
262 if ((req->pipe_type == MDSS_MDP_PIPE_TYPE_CURSOR) &&
263 ((req->z_order != HW_CURSOR_STAGE(mdata)) ||
264 !mdata->ncursor_pipes ||
265 (req->src_rect.w > mdata->max_cursor_size))) {
266 pr_err("Incorrect cursor overlay cursor_pipes=%d zorder=%d\n",
267 mdata->ncursor_pipes, req->z_order);
268 return -EINVAL;
269 }
270
271 if (req->src.width > MAX_IMG_WIDTH ||
272 req->src.height > MAX_IMG_HEIGHT ||
273 req->src_rect.w < min_src_size || req->src_rect.h < min_src_size ||
274 CHECK_BOUNDS(req->src_rect.x, req->src_rect.w, req->src.width) ||
275 CHECK_BOUNDS(req->src_rect.y, req->src_rect.h, req->src.height)) {
276 pr_err("invalid source image img wh=%dx%d rect=%d,%d,%d,%d\n",
277 req->src.width, req->src.height,
278 req->src_rect.x, req->src_rect.y,
279 req->src_rect.w, req->src_rect.h);
280 return -EOVERFLOW;
281 }
282
283 if (req->dst_rect.w < min_dst_size || req->dst_rect.h < min_dst_size) {
284 pr_err("invalid destination resolution (%dx%d)",
285 req->dst_rect.w, req->dst_rect.h);
286 return -EOVERFLOW;
287 }
288
289 if (req->horz_deci || req->vert_deci) {
290 if (!mdata->has_decimation) {
291 pr_err("No Decimation in MDP V=%x\n", mdata->mdp_rev);
292 return -EINVAL;
293 } else if ((req->horz_deci > MAX_DECIMATION) ||
294 (req->vert_deci > MAX_DECIMATION)) {
295 pr_err("Invalid decimation factors horz=%d vert=%d\n",
296 req->horz_deci, req->vert_deci);
297 return -EINVAL;
298 } else if (req->flags & MDP_BWC_EN) {
299 pr_err("Decimation can't be enabled with BWC\n");
300 return -EINVAL;
301 } else if (fmt->fetch_mode != MDSS_MDP_FETCH_LINEAR) {
302 pr_err("Decimation can't be enabled with MacroTile format\n");
303 return -EINVAL;
304 }
305 }
306
307 if (!(req->flags & MDSS_MDP_ROT_ONLY)) {
308 u32 src_w, src_h, dst_w, dst_h;
309
310 if (CHECK_BOUNDS(req->dst_rect.y, req->dst_rect.h, yres)) {
311 pr_err("invalid vertical destination: y=%d, h=%d\n",
312 req->dst_rect.y, req->dst_rect.h);
313 return -EOVERFLOW;
314 }
315
316 if (req->flags & MDP_ROT_90) {
317 dst_h = req->dst_rect.w;
318 dst_w = req->dst_rect.h;
319 } else {
320 dst_w = req->dst_rect.w;
321 dst_h = req->dst_rect.h;
322 }
323
324 src_w = DECIMATED_DIMENSION(req->src_rect.w, req->horz_deci);
325 src_h = DECIMATED_DIMENSION(req->src_rect.h, req->vert_deci);
326
327 if (src_w > mdata->max_pipe_width) {
328 pr_err("invalid source width=%d HDec=%d\n",
329 req->src_rect.w, req->horz_deci);
330 return -EINVAL;
331 }
332
333 if ((src_w * MAX_UPSCALE_RATIO) < dst_w) {
334 pr_err("too much upscaling Width %d->%d\n",
335 req->src_rect.w, req->dst_rect.w);
336 return -EINVAL;
337 }
338
339 if ((src_h * MAX_UPSCALE_RATIO) < dst_h) {
340 pr_err("too much upscaling. Height %d->%d\n",
341 req->src_rect.h, req->dst_rect.h);
342 return -EINVAL;
343 }
344
345 if (src_w > (dst_w * MAX_DOWNSCALE_RATIO)) {
346 pr_err("too much downscaling. Width %d->%d H Dec=%d\n",
347 src_w, req->dst_rect.w, req->horz_deci);
348 return -EINVAL;
349 }
350
351 if (src_h > (dst_h * MAX_DOWNSCALE_RATIO)) {
352 pr_err("too much downscaling. Height %d->%d V Dec=%d\n",
353 src_h, req->dst_rect.h, req->vert_deci);
354 return -EINVAL;
355 }
356
357 if (req->flags & MDP_BWC_EN) {
358 if ((req->src.width != req->src_rect.w) ||
359 (req->src.height != req->src_rect.h)) {
360 pr_err("BWC: mismatch of src img=%dx%d rect=%dx%d\n",
361 req->src.width, req->src.height,
362 req->src_rect.w, req->src_rect.h);
363 return -EINVAL;
364 }
365
366 if ((req->flags & MDP_DECIMATION_EN) ||
367 req->vert_deci || req->horz_deci) {
368 pr_err("Can't enable BWC and decimation\n");
369 return -EINVAL;
370 }
371 }
372
373 if ((req->flags & MDP_DEINTERLACE) &&
374 !req->scale.enable_pxl_ext) {
375 if (req->flags & MDP_SOURCE_ROTATED_90) {
376 if ((req->src_rect.w % 4) != 0) {
377 pr_err("interlaced rect not h/4\n");
378 return -EINVAL;
379 }
380 } else if ((req->src_rect.h % 4) != 0) {
381 pr_err("interlaced rect not h/4\n");
382 return -EINVAL;
383 }
384 }
385 } else {
386 if (req->flags & MDP_DEINTERLACE) {
387 if ((req->src_rect.h % 4) != 0) {
388 pr_err("interlaced rect h not multiple of 4\n");
389 return -EINVAL;
390 }
391 }
392 }
393
394 if (fmt->is_yuv) {
395 if ((req->src_rect.x & 0x1) || (req->src_rect.y & 0x1) ||
396 (req->src_rect.w & 0x1) || (req->src_rect.h & 0x1)) {
397 pr_err("invalid odd src resolution or coordinates\n");
398 return -EINVAL;
399 }
400 }
401
402 return 0;
403}
404
405int mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
406 u32 flags)
407{
408 struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
409 struct mdss_mdp_perf_params perf;
410 int rc;
411
412 memset(&perf, 0, sizeof(perf));
413
414 flags |= PERF_CALC_PIPE_APPLY_CLK_FUDGE |
415 PERF_CALC_PIPE_CALC_SMP_SIZE;
416
417 for (;;) {
418 rc = mdss_mdp_perf_calc_pipe(pipe, &perf, NULL,
419 flags);
420
421 if (!rc && (perf.mdp_clk_rate <= mdata->max_mdp_clk_rate)) {
422 rc = mdss_mdp_perf_bw_check_pipe(&perf, pipe);
423 if (!rc) {
424 break;
425 } else if (rc == -E2BIG &&
426 !__is_more_decimation_doable(pipe)) {
427 pr_debug("pipe%d exceeded per pipe BW\n",
428 pipe->num);
429 return rc;
430 }
431 }
432
433 /*
434 * if decimation is available try to reduce minimum clock rate
435 * requirement by applying vertical decimation and reduce
436 * mdp clock requirement
437 */
438 if (mdata->has_decimation && (pipe->vert_deci < MAX_DECIMATION)
439 && !pipe->bwc_mode && !pipe->scaler.enable &&
440 mdss_mdp_is_linear_format(pipe->src_fmt))
441 pipe->vert_deci++;
442 else
443 return -E2BIG;
444 }
445
446 return 0;
447}
448
449static int __mdss_mdp_validate_pxl_extn(struct mdss_mdp_pipe *pipe)
450{
451 int plane;
452
453 for (plane = 0; plane < MAX_PLANES; plane++) {
454 u32 hor_req_pixels, hor_fetch_pixels;
455 u32 hor_ov_fetch, vert_ov_fetch;
456 u32 vert_req_pixels, vert_fetch_pixels;
457 u32 src_w = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
458 u32 src_h = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
459
460 /*
461 * plane 1 and 2 are for chroma and are same. While configuring
462 * HW, programming only one of the chroma components is
463 * sufficient.
464 */
465 if (plane == 2)
466 continue;
467
468 /*
469 * For chroma plane, width is half for the following sub sampled
470 * formats. Except in case of decimation, where hardware avoids
471 * 1 line of decimation instead of downsampling.
472 */
473 if (plane == 1 && !pipe->horz_deci &&
474 ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
475 (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H2V1))) {
476 src_w >>= 1;
477 }
478
479 if (plane == 1 && !pipe->vert_deci &&
480 ((pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420) ||
481 (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2)))
482 src_h >>= 1;
483
484 hor_req_pixels = pipe->scaler.roi_w[plane] +
485 pipe->scaler.num_ext_pxls_left[plane] +
486 pipe->scaler.num_ext_pxls_right[plane];
487
488 hor_fetch_pixels = src_w +
489 (pipe->scaler.left_ftch[plane] >> pipe->horz_deci) +
490 pipe->scaler.left_rpt[plane] +
491 (pipe->scaler.right_ftch[plane] >> pipe->horz_deci) +
492 pipe->scaler.right_rpt[plane];
493
494 hor_ov_fetch = src_w +
495 (pipe->scaler.left_ftch[plane] >> pipe->horz_deci)+
496 (pipe->scaler.right_ftch[plane] >> pipe->horz_deci);
497
498 vert_req_pixels = pipe->scaler.num_ext_pxls_top[plane] +
499 pipe->scaler.num_ext_pxls_btm[plane];
500
501 vert_fetch_pixels =
502 (pipe->scaler.top_ftch[plane] >> pipe->vert_deci) +
503 pipe->scaler.top_rpt[plane] +
504 (pipe->scaler.btm_ftch[plane] >> pipe->vert_deci)+
505 pipe->scaler.btm_rpt[plane];
506
507 vert_ov_fetch = src_h +
508 (pipe->scaler.top_ftch[plane] >> pipe->vert_deci)+
509 (pipe->scaler.btm_ftch[plane] >> pipe->vert_deci);
510
511 if ((hor_req_pixels != hor_fetch_pixels) ||
512 (hor_ov_fetch > pipe->img_width) ||
513 (vert_req_pixels != vert_fetch_pixels) ||
514 (vert_ov_fetch > pipe->img_height)) {
515 pr_err("err: plane=%d h_req:%d h_fetch:%d v_req:%d v_fetch:%d\n",
516 plane,
517 hor_req_pixels, hor_fetch_pixels,
518 vert_req_pixels, vert_fetch_pixels);
519 pr_err("roi_w[%d]=%d, src_img:[%d, %d]\n",
520 plane, pipe->scaler.roi_w[plane],
521 pipe->img_width, pipe->img_height);
522 pipe->scaler.enable = 0;
523 return -EINVAL;
524 }
525 }
526
527 return 0;
528}
529
530int mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe)
531{
532 u32 src;
533 int rc = 0;
534 struct mdss_data_type *mdata;
535
536 mdata = mdss_mdp_get_mdata();
537 if (pipe->scaler.enable) {
538 if (!test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
539 rc = __mdss_mdp_validate_pxl_extn(pipe);
540 return rc;
541 }
542
543 memset(&pipe->scaler, 0, sizeof(struct mdp_scale_data_v2));
544 src = DECIMATED_DIMENSION(pipe->src.w, pipe->horz_deci);
545 rc = mdss_mdp_calc_phase_step(src, pipe->dst.w,
546 &pipe->scaler.phase_step_x[0]);
547 if (rc == -EOVERFLOW) {
548 /* overflow on horizontal direction is acceptable */
549 rc = 0;
550 } else if (rc) {
551 pr_err("Horizontal scaling calculation failed=%d! %d->%d\n",
552 rc, src, pipe->dst.w);
553 return rc;
554 }
555
556 src = DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci);
557 rc = mdss_mdp_calc_phase_step(src, pipe->dst.h,
558 &pipe->scaler.phase_step_y[0]);
559
560 if ((rc == -EOVERFLOW) && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)) {
561 /* overflow on Qseed2 scaler is acceptable */
562 rc = 0;
563 } else if (rc == -EOVERFLOW) {
564 /* overflow expected and should fallback to GPU */
565 rc = -ECANCELED;
566 } else if (rc) {
567 pr_err("Vertical scaling calculation failed=%d! %d->%d\n",
568 rc, src, pipe->dst.h);
569 }
570
571 if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
572 mdss_mdp_pipe_calc_qseed3_cfg(pipe);
573 else
574 mdss_mdp_pipe_calc_pixel_extn(pipe);
575
576 return rc;
577}
578
579inline void mdss_mdp_overlay_set_chroma_sample(
580 struct mdss_mdp_pipe *pipe)
581{
582 pipe->chroma_sample_v = pipe->chroma_sample_h = 0;
583
584 switch (pipe->src_fmt->chroma_sample) {
585 case MDSS_MDP_CHROMA_H1V2:
586 pipe->chroma_sample_v = 1;
587 break;
588 case MDSS_MDP_CHROMA_H2V1:
589 pipe->chroma_sample_h = 1;
590 break;
591 case MDSS_MDP_CHROMA_420:
592 pipe->chroma_sample_v = 1;
593 pipe->chroma_sample_h = 1;
594 break;
595 }
596 if (pipe->horz_deci)
597 pipe->chroma_sample_h = 0;
598 if (pipe->vert_deci)
599 pipe->chroma_sample_v = 0;
600}
601
602int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
603 struct mdp_overlay *req, struct mdss_mdp_pipe **ppipe,
604 struct mdss_mdp_pipe *left_blend_pipe, bool is_single_layer)
605{
606 struct mdss_mdp_format_params *fmt;
607 struct mdss_mdp_pipe *pipe;
608 struct mdss_mdp_mixer *mixer = NULL;
609 u32 pipe_type, mixer_mux;
610 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
611 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
612 int ret;
613 u32 bwc_enabled;
614 u32 rot90;
615 bool is_vig_needed = false;
616 u32 left_lm_w = left_lm_w_from_mfd(mfd);
617 u32 flags = 0;
618
619 if (mdp5_data->ctl == NULL)
620 return -ENODEV;
621
622 if (req->flags & MDP_ROT_90) {
623 pr_err("unsupported inline rotation\n");
624 return -EOPNOTSUPP;
625 }
626
627 if ((req->dst_rect.w > mdata->max_mixer_width) ||
628 (req->dst_rect.h > MAX_DST_H)) {
629 pr_err("exceeded max mixer supported resolution %dx%d\n",
630 req->dst_rect.w, req->dst_rect.h);
631 return -EOVERFLOW;
632 }
633
634 if (IS_RIGHT_MIXER_OV(req->flags, req->dst_rect.x, left_lm_w))
635 mixer_mux = MDSS_MDP_MIXER_MUX_RIGHT;
636 else
637 mixer_mux = MDSS_MDP_MIXER_MUX_LEFT;
638
639 pr_debug("ctl=%u req id=%x mux=%d z_order=%d flags=0x%x dst_x:%d\n",
640 mdp5_data->ctl->num, req->id, mixer_mux, req->z_order,
641 req->flags, req->dst_rect.x);
642
643 fmt = mdss_mdp_get_format_params(req->src.format);
644 if (!fmt) {
645 pr_err("invalid pipe format %d\n", req->src.format);
646 return -EINVAL;
647 }
648
649 bwc_enabled = req->flags & MDP_BWC_EN;
650 rot90 = req->flags & MDP_SOURCE_ROTATED_90;
651
652 /*
653 * Always set yuv rotator output to pseudo planar.
654 */
655 if (bwc_enabled || rot90) {
656 req->src.format =
657 mdss_mdp_get_rotator_dst_format(req->src.format, rot90,
658 bwc_enabled);
659 fmt = mdss_mdp_get_format_params(req->src.format);
660 if (!fmt) {
661 pr_err("invalid pipe format %d\n", req->src.format);
662 return -EINVAL;
663 }
664 }
665
666 ret = mdss_mdp_ov_xres_check(mfd, req);
667 if (ret)
668 return ret;
669
670 ret = mdss_mdp_overlay_req_check(mfd, req, fmt);
671 if (ret)
672 return ret;
673
674 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
675 if (!mixer) {
676 pr_err("unable to get mixer\n");
677 return -ENODEV;
678 }
679
680 if ((mdata->has_non_scalar_rgb) &&
681 ((req->src_rect.w != req->dst_rect.w) ||
682 (req->src_rect.h != req->dst_rect.h)))
683 is_vig_needed = true;
684
685 if (req->id == MSMFB_NEW_REQUEST) {
686 switch (req->pipe_type) {
687 case PIPE_TYPE_VIG:
688 pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
689 break;
690 case PIPE_TYPE_RGB:
691 pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
692 break;
693 case PIPE_TYPE_DMA:
694 pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
695 break;
696 case PIPE_TYPE_CURSOR:
697 pipe_type = MDSS_MDP_PIPE_TYPE_CURSOR;
698 break;
699 case PIPE_TYPE_AUTO:
700 default:
701 if (req->flags & MDP_OV_PIPE_FORCE_DMA)
702 pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
703 else if (fmt->is_yuv ||
704 (req->flags & MDP_OV_PIPE_SHARE) ||
705 is_vig_needed)
706 pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
707 else
708 pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
709 break;
710 }
711
712 pipe = mdss_mdp_pipe_alloc(mixer, pipe_type, left_blend_pipe);
713
714 /* RGB pipes can be used instead of DMA */
715 if (IS_ERR_OR_NULL(pipe) &&
716 (req->pipe_type == PIPE_TYPE_AUTO) &&
717 (pipe_type == MDSS_MDP_PIPE_TYPE_DMA)) {
718 pr_debug("giving RGB pipe for fb%d. flags:0x%x\n",
719 mfd->index, req->flags);
720 pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
721 pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
722 left_blend_pipe);
723 }
724
725 /* VIG pipes can also support RGB format */
726 if (IS_ERR_OR_NULL(pipe) &&
727 (req->pipe_type == PIPE_TYPE_AUTO) &&
728 (pipe_type == MDSS_MDP_PIPE_TYPE_RGB)) {
729 pr_debug("giving ViG pipe for fb%d. flags:0x%x\n",
730 mfd->index, req->flags);
731 pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
732 pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
733 left_blend_pipe);
734 }
735
736 if (IS_ERR(pipe)) {
737 return PTR_ERR(pipe);
738 } else if (!pipe) {
739 pr_err("error allocating pipe. flags=0x%x req->pipe_type=%d pipe_type=%d\n",
740 req->flags, req->pipe_type, pipe_type);
741 return -ENODEV;
742 }
743
744 ret = mdss_mdp_pipe_map(pipe);
745 if (ret) {
746 pr_err("unable to map pipe=%d\n", pipe->num);
747 return ret;
748 }
749
750 mutex_lock(&mdp5_data->list_lock);
751 list_add(&pipe->list, &mdp5_data->pipes_used);
752 mutex_unlock(&mdp5_data->list_lock);
753 pipe->mixer_left = mixer;
754 pipe->mfd = mfd;
755 pipe->play_cnt = 0;
756 } else {
757 pipe = __overlay_find_pipe(mfd, req->id);
758 if (!pipe) {
759 pr_err("invalid pipe ndx=%x\n", req->id);
760 return -ENODEV;
761 }
762
763 ret = mdss_mdp_pipe_map(pipe);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530764 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530765 pr_err("Unable to map used pipe%d ndx=%x\n",
766 pipe->num, pipe->ndx);
767 return ret;
768 }
769
770 if (is_vig_needed && (pipe->type != MDSS_MDP_PIPE_TYPE_VIG)) {
771 pr_err("pipe is non-scalar ndx=%x\n", req->id);
772 ret = -EINVAL;
773 goto exit_fail;
774 }
775
776 if ((pipe->mixer_left != mixer) &&
777 (pipe->type != MDSS_MDP_PIPE_TYPE_CURSOR)) {
778 if (!mixer->ctl || (mixer->ctl->mfd != mfd)) {
779 pr_err("Can't switch mixer %d->%d pnum %d!\n",
780 pipe->mixer_left->num, mixer->num,
781 pipe->num);
782 ret = -EINVAL;
783 goto exit_fail;
784 }
785 pr_debug("switching pipe%d mixer %d->%d stage%d\n",
786 pipe->num,
787 pipe->mixer_left ? pipe->mixer_left->num : -1,
788 mixer->num, req->z_order);
789 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
790 pipe->mixer_left = mixer;
791 }
792 }
793
794 if (left_blend_pipe) {
795 if (pipe->priority <= left_blend_pipe->priority) {
796 pr_err("priority limitation. left:%d right%d\n",
797 left_blend_pipe->priority, pipe->priority);
798 ret = -EBADSLT;
799 goto exit_fail;
800 } else {
801 pr_debug("pipe%d is a right_pipe\n", pipe->num);
802 pipe->is_right_blend = true;
803 }
804 } else if (pipe->is_right_blend) {
805 /*
806 * pipe used to be right blend need to update mixer
807 * configuration to remove it as a right blend
808 */
809 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
810 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
811 pipe->is_right_blend = false;
812 }
813
814 if (mfd->panel_orientation)
815 req->flags ^= mfd->panel_orientation;
816
817 req->priority = pipe->priority;
818 if (!pipe->dirty && !memcmp(req, &pipe->req_data, sizeof(*req))) {
819 pr_debug("skipping pipe_reconfiguration\n");
820 goto skip_reconfigure;
821 }
822
823 pipe->flags = req->flags;
824 if (bwc_enabled && !mdp5_data->mdata->has_bwc) {
825 pr_err("BWC is not supported in MDP version %x\n",
826 mdp5_data->mdata->mdp_rev);
827 pipe->bwc_mode = 0;
828 } else {
829 pipe->bwc_mode = pipe->mixer_left->rotator_mode ?
830 0 : (bwc_enabled ? 1 : 0);
831 }
832 pipe->img_width = req->src.width & 0x3fff;
833 pipe->img_height = req->src.height & 0x3fff;
834 pipe->src.x = req->src_rect.x;
835 pipe->src.y = req->src_rect.y;
836 pipe->src.w = req->src_rect.w;
837 pipe->src.h = req->src_rect.h;
838 pipe->dst.x = req->dst_rect.x;
839 pipe->dst.y = req->dst_rect.y;
840 pipe->dst.w = req->dst_rect.w;
841 pipe->dst.h = req->dst_rect.h;
842
843 if (mixer->ctl) {
844 pipe->dst.x += mixer->ctl->border_x_off;
845 pipe->dst.y += mixer->ctl->border_y_off;
846 }
847
848 if (mfd->panel_orientation & MDP_FLIP_LR)
849 pipe->dst.x = pipe->mixer_left->width
850 - pipe->dst.x - pipe->dst.w;
851 if (mfd->panel_orientation & MDP_FLIP_UD)
852 pipe->dst.y = pipe->mixer_left->height
853 - pipe->dst.y - pipe->dst.h;
854
855 pipe->horz_deci = req->horz_deci;
856 pipe->vert_deci = req->vert_deci;
857
858 /*
859 * check if overlay span across two mixers and if source split is
860 * available. If yes, enable src_split_req flag so that during mixer
861 * staging, same pipe will be stagged on both layer mixers.
862 */
863 if (mdata->has_src_split) {
864 if ((pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR) &&
865 is_split_lm(mfd)) {
866 pipe->src_split_req = true;
867 } else if ((mixer_mux == MDSS_MDP_MIXER_MUX_LEFT) &&
868 ((req->dst_rect.x + req->dst_rect.w) > mixer->width)) {
869 if (req->dst_rect.x >= mixer->width) {
870 pr_err("%pS: err dst_x can't lie in right half",
871 __builtin_return_address(0));
872 pr_cont(" flags:0x%x dst x:%d w:%d lm_w:%d\n",
873 req->flags, req->dst_rect.x,
874 req->dst_rect.w, mixer->width);
875 ret = -EINVAL;
876 goto exit_fail;
877 } else {
878 pipe->src_split_req = true;
879 }
880 } else {
881 if (pipe->src_split_req) {
882 mdss_mdp_mixer_pipe_unstage(pipe,
883 pipe->mixer_right);
884 pipe->mixer_right = NULL;
885 }
886 pipe->src_split_req = false;
887 }
888 }
889
890 memcpy(&pipe->scaler, &req->scale, sizeof(struct mdp_scale_data));
891 pipe->src_fmt = fmt;
892 mdss_mdp_overlay_set_chroma_sample(pipe);
893
894 pipe->mixer_stage = req->z_order;
895 pipe->is_fg = req->is_fg;
896 pipe->alpha = req->alpha;
897 pipe->transp = req->transp_mask;
898 pipe->blend_op = req->blend_op;
899 if (pipe->blend_op == BLEND_OP_NOT_DEFINED)
900 pipe->blend_op = fmt->alpha_enable ?
901 BLEND_OP_PREMULTIPLIED :
902 BLEND_OP_OPAQUE;
903
904 if (!fmt->alpha_enable && (pipe->blend_op != BLEND_OP_OPAQUE))
905 pr_debug("Unintended blend_op %d on layer with no alpha plane\n",
906 pipe->blend_op);
907
908 if (fmt->is_yuv && !(pipe->flags & MDP_SOURCE_ROTATED_90) &&
909 !pipe->scaler.enable) {
910 pipe->overfetch_disable = OVERFETCH_DISABLE_BOTTOM;
911
912 if (!(pipe->flags & MDSS_MDP_DUAL_PIPE) ||
913 IS_RIGHT_MIXER_OV(pipe->flags, pipe->dst.x, left_lm_w))
914 pipe->overfetch_disable |= OVERFETCH_DISABLE_RIGHT;
915 pr_debug("overfetch flags=%x\n", pipe->overfetch_disable);
916 } else {
917 pipe->overfetch_disable = 0;
918 }
919 pipe->bg_color = req->bg_color;
920
921 if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
922 goto cursor_done;
923
924 mdss_mdp_pipe_pp_clear(pipe);
925 if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
926 memcpy(&pipe->pp_cfg, &req->overlay_pp_cfg,
927 sizeof(struct mdp_overlay_pp_params));
928 ret = mdss_mdp_pp_sspp_config(pipe);
929 if (ret) {
930 pr_err("failed to configure pp params ret %d\n", ret);
931 goto exit_fail;
932 }
933 }
934
935 /*
936 * Populate Color Space.
937 */
938 if (pipe->src_fmt->is_yuv && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG))
939 pipe->csc_coeff_set = req->color_space;
940 /*
941 * When scaling is enabled src crop and image
942 * width and height is modified by user
943 */
944 if ((pipe->flags & MDP_DEINTERLACE) && !pipe->scaler.enable) {
945 if (pipe->flags & MDP_SOURCE_ROTATED_90) {
946 pipe->src.x = DIV_ROUND_UP(pipe->src.x, 2);
947 pipe->src.x &= ~1;
948 pipe->src.w /= 2;
949 pipe->img_width /= 2;
950 } else {
951 pipe->src.h /= 2;
952 pipe->src.y = DIV_ROUND_UP(pipe->src.y, 2);
953 pipe->src.y &= ~1;
954 }
955 }
956
957 if (is_single_layer)
958 flags |= PERF_CALC_PIPE_SINGLE_LAYER;
959
960 ret = mdp_pipe_tune_perf(pipe, flags);
961 if (ret) {
962 pr_debug("unable to satisfy performance. ret=%d\n", ret);
963 goto exit_fail;
964 }
965
966 ret = mdss_mdp_overlay_setup_scaling(pipe);
967 if (ret)
968 goto exit_fail;
969
970 if ((mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
971 (mdp5_data->mdata->wfd_mode == MDSS_MDP_WFD_SHARED))
972 mdss_mdp_smp_release(pipe);
973
974 ret = mdss_mdp_smp_reserve(pipe);
975 if (ret) {
976 pr_debug("mdss_mdp_smp_reserve failed. pnum:%d ret=%d\n",
977 pipe->num, ret);
978 goto exit_fail;
979 }
980
981
982 req->id = pipe->ndx;
983
984cursor_done:
985 req->vert_deci = pipe->vert_deci;
986
987 pipe->req_data = *req;
988 pipe->dirty = false;
989
990 pipe->params_changed++;
991skip_reconfigure:
992 *ppipe = pipe;
993
994 mdss_mdp_pipe_unmap(pipe);
995
996 return ret;
997exit_fail:
998 mdss_mdp_pipe_unmap(pipe);
999
1000 mutex_lock(&mdp5_data->list_lock);
1001 if (pipe->play_cnt == 0) {
1002 pr_debug("failed for pipe %d\n", pipe->num);
1003 if (!list_empty(&pipe->list))
1004 list_del_init(&pipe->list);
1005 mdss_mdp_pipe_destroy(pipe);
1006 }
1007
1008 /* invalidate any overlays in this framebuffer after failure */
1009 list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
1010 pr_debug("freeing allocations for pipe %d\n", pipe->num);
1011 mdss_mdp_smp_unreserve(pipe);
1012 pipe->params_changed = 0;
1013 pipe->dirty = true;
1014 }
1015 mutex_unlock(&mdp5_data->list_lock);
1016 return ret;
1017}
1018
1019static int mdss_mdp_overlay_set(struct msm_fb_data_type *mfd,
1020 struct mdp_overlay *req)
1021{
1022 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1023 int ret;
1024
1025 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
1026 if (ret)
1027 return ret;
1028
1029 if (mdss_fb_is_power_off(mfd)) {
1030 mutex_unlock(&mdp5_data->ov_lock);
1031 return -EPERM;
1032 }
1033
1034 if (req->src.format == MDP_RGB_BORDERFILL) {
1035 req->id = BORDERFILL_NDX;
1036 } else {
1037 struct mdss_mdp_pipe *pipe;
1038
1039 /* userspace zorder start with stage 0 */
1040 req->z_order += MDSS_MDP_STAGE_0;
1041
1042 ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL, false);
1043
1044 req->z_order -= MDSS_MDP_STAGE_0;
1045 }
1046
1047 mutex_unlock(&mdp5_data->ov_lock);
1048
1049 return ret;
1050}
1051
1052/*
1053 * it's caller responsibility to acquire mdp5_data->list_lock while calling
1054 * this function
1055 */
1056struct mdss_mdp_data *mdss_mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd,
1057 struct mdss_mdp_pipe *pipe)
1058{
1059 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1060 struct mdss_mdp_data *buf;
1061 int i;
1062
1063 if (list_empty(&mdp5_data->bufs_pool)) {
1064 pr_debug("allocating %u bufs for fb%d\n",
1065 BUF_POOL_SIZE, mfd->index);
1066
1067 buf = kcalloc(BUF_POOL_SIZE, sizeof(*buf), GFP_KERNEL);
1068 if (!buf)
1069 return NULL;
1070
1071 list_add(&buf->chunk_list, &mdp5_data->bufs_chunks);
1072 kmemleak_not_leak(buf);
1073
1074 for (i = 0; i < BUF_POOL_SIZE; i++) {
1075 buf->state = MDP_BUF_STATE_UNUSED;
1076 list_add(&buf[i].buf_list, &mdp5_data->bufs_pool);
1077 }
1078 }
1079
1080 buf = list_first_entry(&mdp5_data->bufs_pool,
1081 struct mdss_mdp_data, buf_list);
1082 WARN_ON(buf->state != MDP_BUF_STATE_UNUSED);
1083 buf->state = MDP_BUF_STATE_READY;
1084 buf->last_alloc = local_clock();
1085 buf->last_pipe = pipe;
1086
1087 list_move_tail(&buf->buf_list, &mdp5_data->bufs_used);
1088 list_add_tail(&buf->pipe_list, &pipe->buf_queue);
1089
1090 pr_debug("buffer alloc: %pK\n", buf);
1091
1092 return buf;
1093}
1094
1095static
1096struct mdss_mdp_data *__mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd,
1097 struct mdss_mdp_pipe *pipe)
1098{
1099 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1100 struct mdss_mdp_data *buf;
1101
1102 mutex_lock(&mdp5_data->list_lock);
1103 buf = mdss_mdp_overlay_buf_alloc(mfd, pipe);
1104 mutex_unlock(&mdp5_data->list_lock);
1105
1106 return buf;
1107}
1108
1109static void mdss_mdp_overlay_buf_deinit(struct msm_fb_data_type *mfd)
1110{
1111 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1112 struct mdss_mdp_data *buf, *t;
1113
1114 pr_debug("performing cleanup of buffers pool on fb%d\n", mfd->index);
1115
1116 WARN_ON(!list_empty(&mdp5_data->bufs_used));
1117
1118 list_for_each_entry_safe(buf, t, &mdp5_data->bufs_pool, buf_list)
1119 list_del(&buf->buf_list);
1120
1121 list_for_each_entry_safe(buf, t, &mdp5_data->bufs_chunks, chunk_list) {
1122 list_del(&buf->chunk_list);
1123 kfree(buf);
1124 }
1125}
1126
1127/*
1128 * it's caller responsibility to acquire mdp5_data->list_lock while calling
1129 * this function
1130 */
1131void mdss_mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
1132 struct mdss_mdp_data *buf)
1133{
1134 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1135
1136 if (!list_empty(&buf->pipe_list))
1137 list_del_init(&buf->pipe_list);
1138
1139 mdss_mdp_data_free(buf, false, DMA_TO_DEVICE);
1140
1141 buf->last_freed = local_clock();
1142 buf->state = MDP_BUF_STATE_UNUSED;
1143
1144 pr_debug("buffer freed: %pK\n", buf);
1145
1146 list_move_tail(&buf->buf_list, &mdp5_data->bufs_pool);
1147}
1148
1149static void __mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
1150 struct mdss_mdp_data *buf)
1151{
1152 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1153
1154 mutex_lock(&mdp5_data->list_lock);
1155 mdss_mdp_overlay_buf_free(mfd, buf);
1156 mutex_unlock(&mdp5_data->list_lock);
1157}
1158
1159static inline void __pipe_buf_mark_cleanup(struct msm_fb_data_type *mfd,
1160 struct mdss_mdp_data *buf)
1161{
1162 /* buffer still in bufs_used, marking it as cleanup will clean it up */
1163 buf->state = MDP_BUF_STATE_CLEANUP;
1164 list_del_init(&buf->pipe_list);
1165}
1166
1167/**
1168 * __mdss_mdp_overlay_free_list_purge() - clear free list of buffers
1169 * @mfd: Msm frame buffer data structure for the associated fb
1170 *
1171 * Frees memory and clears current list of buffers which are pending free
1172 */
1173static void __mdss_mdp_overlay_free_list_purge(struct msm_fb_data_type *mfd)
1174{
1175 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1176 struct mdss_mdp_data *buf, *t;
1177
1178 pr_debug("purging fb%d free list\n", mfd->index);
1179
1180 list_for_each_entry_safe(buf, t, &mdp5_data->bufs_freelist, buf_list)
1181 mdss_mdp_overlay_buf_free(mfd, buf);
1182}
1183
1184static void __overlay_pipe_cleanup(struct msm_fb_data_type *mfd,
1185 struct mdss_mdp_pipe *pipe)
1186{
1187 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1188 struct mdss_mdp_data *buf, *tmpbuf;
1189
1190 list_for_each_entry_safe(buf, tmpbuf, &pipe->buf_queue, pipe_list) {
1191 __pipe_buf_mark_cleanup(mfd, buf);
1192 list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
1193
1194 /*
1195 * in case of secure UI, the buffer needs to be released as
1196 * soon as session is closed.
1197 */
1198 if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
1199 mdss_mdp_overlay_buf_free(mfd, buf);
1200 }
1201
1202 mdss_mdp_pipe_destroy(pipe);
1203}
1204
1205/**
1206 * mdss_mdp_overlay_cleanup() - handles cleanup after frame commit
1207 * @mfd: Msm frame buffer data structure for the associated fb
1208 * @destroy_pipes: list of pipes that should be destroyed as part of cleanup
1209 *
1210 * Goes through destroy_pipes list and ensures they are ready to be destroyed
1211 * and cleaned up. Also cleanup of any pipe buffers after flip.
1212 */
1213static void mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd,
1214 struct list_head *destroy_pipes)
1215{
1216 struct mdss_mdp_pipe *pipe, *tmp;
1217 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1218 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1219 bool recovery_mode = false;
1220 bool skip_fetch_halt, pair_found;
1221 struct mdss_mdp_data *buf, *tmpbuf;
1222
1223 mutex_lock(&mdp5_data->list_lock);
1224 list_for_each_entry(pipe, destroy_pipes, list) {
1225 pair_found = false;
1226 skip_fetch_halt = false;
1227 tmp = pipe;
1228
1229 /*
1230 * Find if second rect is in the destroy list from the current
1231 * position. So if both rects are part of the destroy list then
1232 * fetch halt will be skipped for the 1st rect.
1233 */
1234 list_for_each_entry_from(tmp, destroy_pipes, list) {
1235 if (tmp->num == pipe->num) {
1236 pair_found = true;
1237 break;
1238 }
1239 }
1240
1241 /* skip fetch halt if pipe's other rect is still in use */
1242 if (!pair_found) {
1243 tmp = (struct mdss_mdp_pipe *)pipe->multirect.next;
1244 if (tmp)
1245 skip_fetch_halt =
1246 atomic_read(&tmp->kref.refcount);
1247 }
1248
1249 /* make sure pipe fetch has been halted before freeing buffer */
1250 if (!skip_fetch_halt && mdss_mdp_pipe_fetch_halt(pipe, false)) {
1251 /*
1252 * if pipe is not able to halt. Enter recovery mode,
1253 * by un-staging any pipes that are attached to mixer
1254 * so that any freed pipes that are not able to halt
1255 * can be staged in solid fill mode and be reset
1256 * with next vsync
1257 */
1258 if (!recovery_mode) {
1259 recovery_mode = true;
1260 mdss_mdp_mixer_unstage_all(ctl->mixer_left);
1261 mdss_mdp_mixer_unstage_all(ctl->mixer_right);
1262 }
1263 pipe->params_changed++;
1264 pipe->unhalted = true;
1265 mdss_mdp_pipe_queue_data(pipe, NULL);
1266 }
1267 }
1268
1269 if (recovery_mode) {
1270 pr_warn("performing recovery sequence for fb%d\n", mfd->index);
1271 __overlay_kickoff_requeue(mfd);
1272 }
1273
1274 __mdss_mdp_overlay_free_list_purge(mfd);
1275
1276 list_for_each_entry_safe(buf, tmpbuf, &mdp5_data->bufs_used, buf_list) {
1277 if (buf->state == MDP_BUF_STATE_CLEANUP)
1278 list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
1279 }
1280
1281 list_for_each_entry_safe(pipe, tmp, destroy_pipes, list) {
1282 list_del_init(&pipe->list);
1283 if (recovery_mode) {
1284 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
1285 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
1286 pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
1287 }
1288 __overlay_pipe_cleanup(mfd, pipe);
1289
1290 if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
1291 /*
1292 * track only RECT0, since at any given point there
1293 * can only be RECT0 only or RECT0 + RECT1
1294 */
1295 ctl->mixer_left->next_pipe_map &= ~pipe->ndx;
1296 if (ctl->mixer_right)
1297 ctl->mixer_right->next_pipe_map &= ~pipe->ndx;
1298 }
1299 }
1300 mutex_unlock(&mdp5_data->list_lock);
1301}
1302
1303void mdss_mdp_handoff_cleanup_pipes(struct msm_fb_data_type *mfd,
1304 u32 type)
1305{
1306 u32 i, npipes;
1307 struct mdss_mdp_pipe *pipe;
1308 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1309 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
1310
1311 switch (type) {
1312 case MDSS_MDP_PIPE_TYPE_VIG:
1313 pipe = mdata->vig_pipes;
1314 npipes = mdata->nvig_pipes;
1315 break;
1316 case MDSS_MDP_PIPE_TYPE_RGB:
1317 pipe = mdata->rgb_pipes;
1318 npipes = mdata->nrgb_pipes;
1319 break;
1320 case MDSS_MDP_PIPE_TYPE_DMA:
1321 pipe = mdata->dma_pipes;
1322 npipes = mdata->ndma_pipes;
1323 break;
1324 default:
1325 return;
1326 }
1327
1328 for (i = 0; i < npipes; i++) {
1329 /* only check for first rect and ignore additional */
1330 if (pipe->is_handed_off) {
1331 pr_debug("Unmapping handed off pipe %d\n", pipe->num);
1332 list_move(&pipe->list, &mdp5_data->pipes_cleanup);
1333 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
1334 pipe->is_handed_off = false;
1335 }
1336 pipe += pipe->multirect.max_rects;
1337 }
1338}
1339
1340/**
1341 * mdss_mdp_overlay_start() - Programs the MDP control data path to hardware
1342 * @mfd: Msm frame buffer structure associated with fb device.
1343 *
1344 * Program the MDP hardware with the control settings for the framebuffer
1345 * device. In addition to this, this function also handles the transition
1346 * from the the splash screen to the android boot animation when the
1347 * continuous splash screen feature is enabled.
1348 */
1349int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
1350{
1351 int rc;
1352 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1353 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
1354 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
1355
1356 if (mdss_mdp_ctl_is_power_on(ctl)) {
1357 if (!mdp5_data->mdata->batfet)
1358 mdss_mdp_batfet_ctrl(mdp5_data->mdata, true);
1359 mdss_mdp_release_splash_pipe(mfd);
1360 return 0;
1361 } else if (mfd->panel_info->cont_splash_enabled) {
1362 if (mdp5_data->allow_kickoff) {
1363 mdp5_data->allow_kickoff = false;
1364 } else {
1365 mutex_lock(&mdp5_data->list_lock);
1366 rc = list_empty(&mdp5_data->pipes_used);
1367 mutex_unlock(&mdp5_data->list_lock);
1368 if (rc) {
1369 pr_debug("empty kickoff on fb%d during cont splash\n",
1370 mfd->index);
1371 return -EPERM;
1372 }
1373 }
1374 } else if (mdata->handoff_pending) {
1375 pr_warn("fb%d: commit while splash handoff pending\n",
1376 mfd->index);
1377 return -EPERM;
1378 }
1379
1380 pr_debug("starting fb%d overlay\n", mfd->index);
1381
1382 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
1383
1384 /*
1385 * If idle pc feature is not enabled, then get a reference to the
1386 * runtime device which will be released when overlay is turned off
1387 */
1388 if (!mdp5_data->mdata->idle_pc_enabled ||
1389 (mfd->panel_info->type != MIPI_CMD_PANEL)) {
1390 rc = pm_runtime_get_sync(&mfd->pdev->dev);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301391 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301392 pr_err("unable to resume with pm_runtime_get_sync rc=%d\n",
1393 rc);
1394 goto end;
1395 }
1396 }
1397
1398 /*
1399 * We need to do hw init before any hw programming.
1400 * Also, hw init involves programming the VBIF registers which
1401 * should be done only after attaching IOMMU which in turn would call
1402 * in to TZ to restore security configs on the VBIF registers.
1403 * This is not needed when continuous splash screen is enabled since
1404 * we would have called in to TZ to restore security configs from LK.
1405 */
1406 if (!mfd->panel_info->cont_splash_enabled) {
1407 rc = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301408 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301409 pr_err("iommu attach failed rc=%d\n", rc);
1410 goto end;
1411 }
1412 mdss_hw_init(mdss_res);
1413 mdss_iommu_ctrl(0);
1414 }
1415
1416 /*
1417 * Increment the overlay active count prior to calling ctl_start.
1418 * This is needed to ensure that if idle power collapse kicks in
1419 * right away, it would be handled correctly.
1420 */
1421 atomic_inc(&mdp5_data->mdata->active_intf_cnt);
1422 rc = mdss_mdp_ctl_start(ctl, false);
1423 if (rc == 0) {
1424 mdss_mdp_ctl_notifier_register(mdp5_data->ctl,
1425 &mfd->mdp_sync_pt_data.notifier);
1426 } else {
1427 pr_err("mdp ctl start failed.\n");
1428 goto ctl_error;
1429 }
1430
1431 /* Restore any previously configured PP features by resetting the dirty
1432 * bits for enabled features. The dirty bits will be consumed during the
1433 * first display commit when the PP hardware blocks are updated
1434 */
1435 rc = mdss_mdp_pp_resume(mfd);
1436 if (rc && (rc != -EPERM) && (rc != -ENODEV))
1437 pr_err("PP resume err %d\n", rc);
1438
1439 rc = mdss_mdp_splash_cleanup(mfd, true);
1440 if (!rc)
1441 goto end;
1442
1443ctl_error:
1444 mdss_mdp_ctl_destroy(ctl);
1445 atomic_dec(&mdp5_data->mdata->active_intf_cnt);
1446 mdp5_data->ctl = NULL;
1447end:
1448 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
1449 return rc;
1450}
1451
1452static void mdss_mdp_overlay_update_pm(struct mdss_overlay_private *mdp5_data)
1453{
1454 ktime_t wakeup_time;
1455
1456 if (!mdp5_data->cpu_pm_hdl)
1457 return;
1458
1459 if (mdss_mdp_display_wakeup_time(mdp5_data->ctl, &wakeup_time))
1460 return;
1461
1462 activate_event_timer(mdp5_data->cpu_pm_hdl, wakeup_time);
1463}
1464
1465static void __unstage_pipe_and_clean_buf(struct msm_fb_data_type *mfd,
1466 struct mdss_mdp_pipe *pipe, struct mdss_mdp_data *buf)
1467{
1468
1469 pr_debug("unstaging pipe:%d rect:%d buf:%d\n",
1470 pipe->num, pipe->multirect.num, !buf);
1471 MDSS_XLOG(pipe->num, pipe->multirect.num, !buf);
1472 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
1473 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
1474 pipe->dirty = true;
1475
1476 if (buf)
1477 __pipe_buf_mark_cleanup(mfd, buf);
1478}
1479
1480static int __overlay_queue_pipes(struct msm_fb_data_type *mfd)
1481{
1482 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1483 struct mdss_mdp_pipe *pipe;
1484 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1485 struct mdss_mdp_ctl *tmp;
1486 int ret = 0;
1487
1488 list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
1489 struct mdss_mdp_data *buf;
1490
1491 if (pipe->dirty) {
1492 pr_err("fb%d: pipe %d dirty! skipping configuration\n",
1493 mfd->index, pipe->num);
1494 continue;
1495 }
1496
1497 /*
1498 * When secure display is enabled, if there is a non secure
1499 * display pipe, skip that
1500 */
1501 if (mdss_get_sd_client_cnt() &&
1502 !(pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)) {
1503 pr_warn("Non secure pipe during secure display: %u: %08X, skip\n",
1504 pipe->num, pipe->flags);
1505 continue;
1506 }
1507 /*
1508 * When external is connected and no dedicated wfd is present,
1509 * reprogram DMA pipe before kickoff to clear out any previous
1510 * block mode configuration.
1511 */
1512 if ((pipe->type == MDSS_MDP_PIPE_TYPE_DMA) &&
1513 (ctl->shared_lock &&
1514 (ctl->mdata->wfd_mode == MDSS_MDP_WFD_SHARED))) {
1515 if (ctl->mdata->mixer_switched) {
1516 ret = mdss_mdp_overlay_pipe_setup(mfd,
1517 &pipe->req_data, &pipe, NULL, false);
1518 pr_debug("resetting DMA pipe for ctl=%d",
1519 ctl->num);
1520 }
1521 if (ret) {
1522 pr_err("can't reset DMA pipe ret=%d ctl=%d\n",
1523 ret, ctl->num);
1524 return ret;
1525 }
1526
1527 tmp = mdss_mdp_ctl_mixer_switch(ctl,
1528 MDSS_MDP_WB_CTL_TYPE_LINE);
1529 if (!tmp)
1530 return -EINVAL;
1531 pipe->mixer_left = mdss_mdp_mixer_get(tmp,
1532 MDSS_MDP_MIXER_MUX_DEFAULT);
1533 }
1534
1535 buf = list_first_entry_or_null(&pipe->buf_queue,
1536 struct mdss_mdp_data, pipe_list);
1537 if (buf) {
1538 switch (buf->state) {
1539 case MDP_BUF_STATE_READY:
1540 pr_debug("pnum=%d buf=%pK first buffer ready\n",
1541 pipe->num, buf);
1542 break;
1543 case MDP_BUF_STATE_ACTIVE:
1544 if (list_is_last(&buf->pipe_list,
1545 &pipe->buf_queue)) {
1546 pr_debug("pnum=%d no buf update\n",
1547 pipe->num);
1548 } else {
1549 struct mdss_mdp_data *tmp = buf;
1550 /*
1551 * buffer flip, new buffer will
1552 * replace currently active one,
1553 * mark currently active for cleanup
1554 */
1555 buf = list_next_entry(tmp, pipe_list);
1556 __pipe_buf_mark_cleanup(mfd, tmp);
1557 }
1558 break;
1559 default:
1560 pr_err("invalid state of buf %pK=%d\n",
1561 buf, buf->state);
1562 WARN_ON(1);
1563 break;
1564 }
1565 }
1566
1567 /* ensure pipes are reconfigured after power off/on */
1568 if (ctl->play_cnt == 0)
1569 pipe->params_changed++;
1570
1571 if (buf && (buf->state == MDP_BUF_STATE_READY)) {
1572 buf->state = MDP_BUF_STATE_ACTIVE;
1573 ret = mdss_mdp_data_map(buf, false, DMA_TO_DEVICE);
1574 } else if (!pipe->params_changed &&
1575 !mdss_mdp_is_roi_changed(pipe->mfd)) {
1576
1577 /*
1578 * no update for the given pipe nor any change in the
1579 * ROI so skip pipe programming and continue with next.
1580 */
1581 continue;
1582 } else if (buf) {
1583 WARN_ON(buf->state != MDP_BUF_STATE_ACTIVE);
1584 pr_debug("requeueing active buffer on pnum=%d\n",
1585 pipe->num);
1586 } else if ((pipe->flags & MDP_SOLID_FILL) == 0) {
1587 pr_warn("commit without buffer on pipe %d\n",
1588 pipe->num);
1589 ret = -EINVAL;
1590 }
1591 /*
1592 * if we reach here without errors and buf == NULL
1593 * then solid fill will be set
1594 */
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301595 if (!IS_ERR_VALUE((unsigned long)ret))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301596 ret = mdss_mdp_pipe_queue_data(pipe, buf);
1597
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301598 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301599 pr_warn("Unable to queue data for pnum=%d rect=%d\n",
1600 pipe->num, pipe->multirect.num);
1601
1602 /*
1603 * If we fail for a multi-rect pipe, unstage both rects
1604 * so we don't leave the pipe configured in multi-rect
1605 * mode with only one rectangle staged.
1606 */
1607 if (pipe->multirect.mode !=
1608 MDSS_MDP_PIPE_MULTIRECT_NONE) {
1609 struct mdss_mdp_pipe *next_pipe =
1610 (struct mdss_mdp_pipe *)
1611 pipe->multirect.next;
1612
1613 if (next_pipe) {
1614 struct mdss_mdp_data *next_buf =
1615 list_first_entry_or_null(
1616 &next_pipe->buf_queue,
1617 struct mdss_mdp_data,
1618 pipe_list);
1619
1620 __unstage_pipe_and_clean_buf(mfd,
1621 next_pipe, next_buf);
1622 } else {
1623 pr_warn("cannot find rect pnum=%d\n",
1624 pipe->num);
1625 }
1626 }
1627
1628 __unstage_pipe_and_clean_buf(mfd, pipe, buf);
1629 }
1630 }
1631
1632 return 0;
1633}
1634
1635static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd)
1636{
1637 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1638
1639 mdss_mdp_display_commit(ctl, NULL, NULL);
1640 mdss_mdp_display_wait4comp(ctl);
1641
1642 /* unstage any recovery pipes and re-queue used pipes */
1643 mdss_mdp_mixer_unstage_all(ctl->mixer_left);
1644 mdss_mdp_mixer_unstage_all(ctl->mixer_right);
1645
1646 __overlay_queue_pipes(mfd);
1647
1648 mdss_mdp_display_commit(ctl, NULL, NULL);
1649 mdss_mdp_display_wait4comp(ctl);
1650}
1651
1652static int mdss_mdp_commit_cb(enum mdp_commit_stage_type commit_stage,
1653 void *data)
1654{
1655 int ret = 0;
1656 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
1657 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1658 struct mdss_mdp_ctl *ctl;
1659
1660 switch (commit_stage) {
1661 case MDP_COMMIT_STAGE_SETUP_DONE:
1662 ctl = mfd_to_ctl(mfd);
1663 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
1664 mdp5_data->kickoff_released = true;
1665 mutex_unlock(&mdp5_data->ov_lock);
1666 break;
1667 case MDP_COMMIT_STAGE_READY_FOR_KICKOFF:
1668 mutex_lock(&mdp5_data->ov_lock);
1669 break;
1670 default:
1671 pr_err("Invalid commit stage %x", commit_stage);
1672 break;
1673 }
1674
1675 return ret;
1676}
1677
1678/**
1679 * __is_roi_valid() - Check if ctl roi is valid for a given pipe.
1680 * @pipe: pipe to check against.
1681 * @l_roi: roi of the left ctl path.
1682 * @r_roi: roi of the right ctl path.
1683 *
1684 * Validate roi against pipe's destination rectangle by checking following
1685 * conditions. If any of these conditions are met then return failure,
1686 * success otherwise.
1687 *
1688 * 1. Pipe has scaling and pipe's destination is intersecting with roi.
1689 * 2. Pipe's destination and roi do not overlap, In such cases, pipe should
1690 * not be part of used list and should have been omitted by user program.
1691 */
1692static bool __is_roi_valid(struct mdss_mdp_pipe *pipe,
1693 struct mdss_rect *l_roi, struct mdss_rect *r_roi)
1694{
1695 bool ret = true;
1696 bool is_right_mixer = pipe->mixer_left->is_right_mixer;
1697 struct mdss_rect roi = is_right_mixer ? *r_roi : *l_roi;
1698 struct mdss_rect dst = pipe->dst;
1699 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1700 u32 left_lm_w = left_lm_w_from_mfd(pipe->mfd);
1701
1702 if (pipe->src_split_req) {
1703 if (roi.w) {
1704 /* left_roi is valid */
1705 roi.w += r_roi->w;
1706 } else {
1707 /*
1708 * if we come here then left_roi is zero but pipe's
1709 * output is crossing LM boundary if it was Full Screen
1710 * update. In such case, if right ROI's (x+w) is less
1711 * than pipe's dst_x then #2 check will fail even
1712 * though in full coordinate system it is valid.
1713 * ex:
1714 * left_lm_w = 800;
1715 * pipe->dst.x = 400;
1716 * pipe->dst.w = 800;
1717 * r_roi.x + r_roi.w = 300;
1718 * To avoid such pitfall, extend ROI for comparison.
1719 */
1720 roi.w += left_lm_w + r_roi->w;
1721 }
1722 }
1723
1724 if (mdata->has_src_split && is_right_mixer)
1725 dst.x -= left_lm_w;
1726
1727 /* condition #1 above */
1728 if ((pipe->scaler.enable) ||
1729 (pipe->src.w != dst.w) || (pipe->src.h != dst.h)) {
1730 struct mdss_rect res;
1731
1732 mdss_mdp_intersect_rect(&res, &dst, &roi);
1733
1734 if (!mdss_rect_cmp(&res, &dst)) {
1735 pr_err("error. pipe%d has scaling and its output is interesecting with roi.\n",
1736 pipe->num);
1737 pr_err("pipe_dst:-> %d %d %d %d roi:-> %d %d %d %d\n",
1738 dst.x, dst.y, dst.w, dst.h,
1739 roi.x, roi.y, roi.w, roi.h);
1740 ret = false;
1741 goto end;
1742 }
1743 }
1744
1745 /* condition #2 above */
1746 if (!mdss_rect_overlap_check(&dst, &roi)) {
1747 pr_err("error. pipe%d's output is outside of ROI.\n",
1748 pipe->num);
1749 ret = false;
1750 }
1751end:
1752 return ret;
1753}
1754
1755int mdss_mode_switch(struct msm_fb_data_type *mfd, u32 mode)
1756{
1757 struct mdss_rect l_roi, r_roi;
1758 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1759 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1760 struct mdss_mdp_ctl *sctl;
1761 int rc = 0;
1762
1763 pr_debug("fb%d switch to mode=%x\n", mfd->index, mode);
1764 ATRACE_FUNC();
1765
1766 ctl->pending_mode_switch = mode;
1767 sctl = mdss_mdp_get_split_ctl(ctl);
1768 if (sctl)
1769 sctl->pending_mode_switch = mode;
1770
1771 /* No need for mode validation. It has been done in ioctl call */
1772 if (mode == SWITCH_RESOLUTION) {
1773 if (ctl->ops.reconfigure) {
1774 /* wait for previous frame to complete before switch */
1775 if (ctl->ops.wait_pingpong)
1776 rc = ctl->ops.wait_pingpong(ctl, NULL);
1777 if (!rc && sctl && sctl->ops.wait_pingpong)
1778 rc = sctl->ops.wait_pingpong(sctl, NULL);
1779 if (rc) {
1780 pr_err("wait for pp failed before resolution switch\n");
1781 return rc;
1782 }
1783
1784 /*
1785 * Configure the mixer parameters before the switch as
1786 * the DSC parameter calculation is based on the mixer
1787 * ROI. And set it to full ROI as driver expects the
1788 * first frame after the resolution switch to be a
1789 * full frame update.
1790 */
1791 if (ctl->mixer_left) {
1792 l_roi = (struct mdss_rect) {0, 0,
1793 ctl->mixer_left->width,
1794 ctl->mixer_left->height};
1795 ctl->mixer_left->roi_changed = true;
1796 ctl->mixer_left->valid_roi = true;
1797 }
1798 if (ctl->mixer_right) {
1799 r_roi = (struct mdss_rect) {0, 0,
1800 ctl->mixer_right->width,
1801 ctl->mixer_right->height};
1802 ctl->mixer_right->roi_changed = true;
1803 ctl->mixer_right->valid_roi = true;
1804 }
1805 mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
1806
1807 mutex_lock(&mdp5_data->ov_lock);
1808 ctl->ops.reconfigure(ctl, mode, 1);
1809 mutex_unlock(&mdp5_data->ov_lock);
1810 /*
1811 * For Video mode panels, reconfigure is not defined.
1812 * So doing an explicit ctrl stop during resolution switch
1813 * to balance the ctrl start at the end of this function.
1814 */
1815 } else {
1816 mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
1817 }
1818 } else if (mode == MIPI_CMD_PANEL) {
1819 /*
1820 * Need to reset roi if there was partial update in previous
1821 * Command frame
1822 */
1823 l_roi = (struct mdss_rect){0, 0,
1824 ctl->mixer_left->width,
1825 ctl->mixer_left->height};
1826 if (ctl->mixer_right) {
1827 r_roi = (struct mdss_rect) {0, 0,
1828 ctl->mixer_right->width,
1829 ctl->mixer_right->height};
1830 }
1831 mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
1832 mdss_mdp_switch_roi_reset(ctl);
1833
1834 mdss_mdp_switch_to_cmd_mode(ctl, 1);
1835 mdss_mdp_update_panel_info(mfd, 1, 0);
1836 mdss_mdp_switch_to_cmd_mode(ctl, 0);
1837 mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
1838 } else if (mode == MIPI_VIDEO_PANEL) {
1839 if (ctl->ops.wait_pingpong)
1840 rc = ctl->ops.wait_pingpong(ctl, NULL);
1841 mdss_mdp_update_panel_info(mfd, 0, 0);
1842 mdss_mdp_switch_to_vid_mode(ctl, 1);
1843 mdss_mdp_ctl_stop(ctl, MDSS_PANEL_POWER_OFF);
1844 mdss_mdp_switch_to_vid_mode(ctl, 0);
1845 } else {
1846 pr_err("Invalid mode switch arg %d\n", mode);
1847 return -EINVAL;
1848 }
1849
1850 mdss_mdp_ctl_start(ctl, true);
1851 ATRACE_END(__func__);
1852
1853 return 0;
1854}
1855
1856int mdss_mode_switch_post(struct msm_fb_data_type *mfd, u32 mode)
1857{
1858 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1859 struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
1860 struct dsi_panel_clk_ctrl clk_ctrl;
1861 int rc = 0;
1862 u32 frame_rate = 0;
1863
1864 if (mode == MIPI_VIDEO_PANEL) {
1865 /*
1866 * Need to make sure one frame has been sent in
1867 * video mode prior to issuing the mode switch
1868 * DCS to panel.
1869 */
1870 frame_rate = mdss_panel_get_framerate
1871 (&(ctl->panel_data->panel_info),
1872 FPS_RESOLUTION_HZ);
1873 if (!(frame_rate >= 24 && frame_rate <= 240))
1874 frame_rate = 24;
1875 frame_rate = ((1000/frame_rate) + 1);
1876 msleep(frame_rate);
1877
1878 pr_debug("%s, start\n", __func__);
1879 rc = mdss_mdp_ctl_intf_event(ctl,
1880 MDSS_EVENT_DSI_DYNAMIC_SWITCH,
1881 (void *) MIPI_VIDEO_PANEL, CTL_INTF_EVENT_FLAG_DEFAULT);
1882 pr_debug("%s, end\n", __func__);
1883 } else if (mode == MIPI_CMD_PANEL) {
1884 /*
1885 * Needed to balance out clk refcount when going
1886 * from video to command. This allows for idle
1887 * power collapse to work as intended.
1888 */
1889 clk_ctrl.state = MDSS_DSI_CLK_OFF;
1890 clk_ctrl.client = DSI_CLK_REQ_DSI_CLIENT;
1891 if (sctl)
1892 mdss_mdp_ctl_intf_event(sctl,
1893 MDSS_EVENT_PANEL_CLK_CTRL, (void *)&clk_ctrl,
1894 CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
1895
1896 mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_CLK_CTRL,
1897 (void *)&clk_ctrl, CTL_INTF_EVENT_FLAG_SKIP_BROADCAST);
1898 } else if (mode == SWITCH_RESOLUTION) {
1899 if (ctl->ops.reconfigure)
1900 rc = ctl->ops.reconfigure(ctl, mode, 0);
1901 }
1902 ctl->pending_mode_switch = 0;
1903 if (sctl)
1904 sctl->pending_mode_switch = 0;
1905
1906 return rc;
1907}
1908
1909static void __validate_and_set_roi(struct msm_fb_data_type *mfd,
1910 struct mdp_display_commit *commit)
1911{
1912 struct mdss_mdp_pipe *pipe;
1913 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
1914 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1915 struct mdss_rect l_roi = {0}, r_roi = {0};
1916 struct mdp_rect tmp_roi = {0};
1917 bool skip_partial_update = true;
1918
1919 if (!commit)
1920 goto set_roi;
1921
1922 if (!memcmp(&commit->l_roi, &tmp_roi, sizeof(tmp_roi)) &&
1923 !memcmp(&commit->r_roi, &tmp_roi, sizeof(tmp_roi)))
1924 goto set_roi;
1925
1926 rect_copy_mdp_to_mdss(&commit->l_roi, &l_roi);
1927 rect_copy_mdp_to_mdss(&commit->r_roi, &r_roi);
1928
1929 pr_debug("input: l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
1930 l_roi.x, l_roi.y, l_roi.w, l_roi.h,
1931 r_roi.x, r_roi.y, r_roi.w, r_roi.h);
1932
1933 /*
1934 * Configure full ROI
1935 * - If partial update is disabled
1936 * - If it is the first frame update after dynamic resolution switch
1937 */
1938 if (!ctl->panel_data->panel_info.partial_update_enabled
1939 || (ctl->pending_mode_switch == SWITCH_RESOLUTION))
1940 goto set_roi;
1941
1942 skip_partial_update = false;
1943
1944 if (is_split_lm(mfd) && mdp5_data->mdata->has_src_split) {
1945 u32 left_lm_w = left_lm_w_from_mfd(mfd);
1946 struct mdss_rect merged_roi = l_roi;
1947
1948 /*
1949 * When source split is enabled on split LM displays,
1950 * user program merges left and right ROI and sends
1951 * it through l_roi. Split this merged ROI into
1952 * left/right ROI for validation.
1953 */
1954 mdss_rect_split(&merged_roi, &l_roi, &r_roi, left_lm_w);
1955
1956 /*
1957 * When source split is enabled on split LM displays,
1958 * it is a HW requirement that both LM have same width
1959 * if update is on both sides. Since ROIs are
1960 * generated by user-land program, validate against
1961 * this requirement.
1962 */
1963 if (l_roi.w && r_roi.w && (l_roi.w != r_roi.w)) {
1964 pr_err("error. ROI's do not match. violating src_split requirement\n");
1965 pr_err("l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
1966 l_roi.x, l_roi.y, l_roi.w, l_roi.h,
1967 r_roi.x, r_roi.y, r_roi.w, r_roi.h);
1968 skip_partial_update = true;
1969 goto set_roi;
1970 }
1971 }
1972
1973 list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
1974 if (!__is_roi_valid(pipe, &l_roi, &r_roi)) {
1975 skip_partial_update = true;
1976 pr_err("error. invalid pu config for pipe%d: %d,%d,%d,%d\n",
1977 pipe->num,
1978 pipe->dst.x, pipe->dst.y,
1979 pipe->dst.w, pipe->dst.h);
1980 break;
1981 }
1982 }
1983
1984set_roi:
1985 if (skip_partial_update) {
1986 l_roi = (struct mdss_rect){0, 0,
1987 ctl->mixer_left->width,
1988 ctl->mixer_left->height};
1989 if (ctl->mixer_right) {
1990 r_roi = (struct mdss_rect) {0, 0,
1991 ctl->mixer_right->width,
1992 ctl->mixer_right->height};
1993 }
1994 }
1995
1996 pr_debug("after processing: %s l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
1997 (l_roi.w && l_roi.h && r_roi.w && r_roi.h) ? "left+right" :
1998 ((l_roi.w && l_roi.h) ? "left-only" : "right-only"),
1999 l_roi.x, l_roi.y, l_roi.w, l_roi.h,
2000 r_roi.x, r_roi.y, r_roi.w, r_roi.h);
2001
2002 mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
2003}
2004
2005static bool __is_supported_candence(int cadence)
2006{
2007 return (cadence == FRC_CADENCE_22) ||
2008 (cadence == FRC_CADENCE_23) ||
2009 (cadence == FRC_CADENCE_23223);
2010}
2011
2012/* compute how many vsyncs between these 2 timestamp */
2013static int __compute_vsync_diff(s64 cur_ts,
2014 s64 base_ts, int display_fp1000s)
2015{
2016 int vsync_diff;
2017 int round_up = 0;
Sachin Bhayareb6b5a0f2018-03-02 19:50:39 +05302018 u64 ts_diff = (cur_ts - base_ts) * display_fp1000s;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302019
2020 do_div(ts_diff, 1000000);
2021 vsync_diff = (int)ts_diff;
2022 /*
2023 * In most case DIV_ROUND_UP_ULL is enough, but calculation might be
2024 * impacted by possible jitter when vsync_diff is close to boundaries.
2025 * E.g., we have 30fps like 12.0->13.998->15.999->18.0->19.998->21.999
2026 * and 7460.001->7462.002->7464.0->7466.001->7468.002. DIV_ROUND_UP_ULL
2027 * fails in the later case.
2028 */
2029 round_up = ((vsync_diff % 1000) >= 900) ? 1 : 0;
2030 /* round up vsync count to accommodate fractions: base & diff */
2031 vsync_diff = (vsync_diff / 1000) + round_up + 1;
2032 return vsync_diff;
2033}
2034
2035static bool __validate_frc_info(struct mdss_mdp_frc_info *frc_info)
2036{
2037 struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
2038 struct mdss_mdp_frc_data *last_frc = &frc_info->last_frc;
2039 struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
2040
2041 pr_debug("frc: cur_fcnt=%d, cur_ts=%lld, last_fcnt=%d, last_ts=%lld, base_fcnt=%d, base_ts=%lld last_v_cnt=%d, last_repeat=%d base_v_cnt=%d\n",
2042 cur_frc->frame_cnt, cur_frc->timestamp,
2043 last_frc->frame_cnt, last_frc->timestamp,
2044 base_frc->frame_cnt, base_frc->timestamp,
2045 frc_info->last_vsync_cnt, frc_info->last_repeat,
2046 frc_info->base_vsync_cnt);
2047
2048 if ((cur_frc->frame_cnt == last_frc->frame_cnt) &&
2049 (cur_frc->timestamp == last_frc->timestamp)) {
2050 /* ignore repeated frame: video w/ UI layers */
2051 pr_debug("repeated frame input\n");
2052 return false;
2053 }
2054
2055 return true;
2056}
2057
2058static void __init_cadence_calc(struct mdss_mdp_frc_cadence_calc *calc)
2059{
2060 memset(calc, 0, sizeof(struct mdss_mdp_frc_cadence_calc));
2061}
2062
2063static int __calculate_cadence_id(struct mdss_mdp_frc_info *frc_info, int cnt)
2064{
2065 struct mdss_mdp_frc_cadence_calc *calc = &frc_info->calc;
2066 struct mdss_mdp_frc_data *first = &calc->samples[0];
2067 struct mdss_mdp_frc_data *last = &calc->samples[cnt-1];
Sachin Bhayareb6b5a0f2018-03-02 19:50:39 +05302068 u64 ts_diff =
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302069 (last->timestamp - first->timestamp)
2070 * frc_info->display_fp1000s;
2071 u32 fcnt_diff =
2072 last->frame_cnt - first->frame_cnt;
2073 u32 fps_ratio;
2074 u32 cadence_id = FRC_CADENCE_NONE;
2075
2076 do_div(ts_diff, fcnt_diff);
2077 fps_ratio = (u32)ts_diff;
2078
2079 if ((fps_ratio > FRC_CADENCE_23_RATIO_LOW) &&
2080 (fps_ratio < FRC_CADENCE_23_RATIO_HIGH))
2081 cadence_id = FRC_CADENCE_23;
2082 else if ((fps_ratio > FRC_CADENCE_22_RATIO_LOW) &&
2083 (fps_ratio < FRC_CADENCE_22_RATIO_HIGH))
2084 cadence_id = FRC_CADENCE_22;
2085 else if ((fps_ratio > FRC_CADENCE_23223_RATIO_LOW) &&
2086 (fps_ratio < FRC_CADENCE_23223_RATIO_HIGH))
2087 cadence_id = FRC_CADENCE_23223;
2088
2089 pr_debug("frc: first=%lld, last=%lld, cnt=%d, fps_ratio=%u, cadence_id=%d\n",
2090 first->timestamp, last->timestamp, fcnt_diff,
2091 fps_ratio, cadence_id);
2092
2093 return cadence_id;
2094}
2095
2096static void __init_seq_gen(struct mdss_mdp_frc_seq_gen *gen, int cadence_id)
2097{
2098 int cadence22[2] = {2, 2};
2099 int cadence23[2] = {2, 3};
2100 int cadence23223[5] = {2, 3, 2, 2, 3};
2101 int *cadence = NULL;
2102 int len = 0;
2103
2104 memset(gen, 0, sizeof(struct mdss_mdp_frc_seq_gen));
2105 gen->pos = -EBADSLT;
2106 gen->base = -1;
2107
2108 switch (cadence_id) {
2109 case FRC_CADENCE_22:
2110 cadence = cadence22;
2111 len = 2;
2112 break;
2113 case FRC_CADENCE_23:
2114 cadence = cadence23;
2115 len = 2;
2116 break;
2117 case FRC_CADENCE_23223:
2118 cadence = cadence23223;
2119 len = 5;
2120 break;
2121 default:
2122 break;
2123 }
2124
2125 if (len > 0) {
2126 memcpy(gen->seq, cadence, len * sizeof(int));
2127 gen->len = len;
2128 gen->retry = 0;
2129 }
2130
2131 pr_debug("init sequence, cadence=%d len=%d\n", cadence_id, len);
2132}
2133
2134static int __match_sequence(struct mdss_mdp_frc_seq_gen *gen)
2135{
2136 int pos, i;
2137 int len = gen->len;
2138
2139 /* use default position if many attempts have failed */
2140 if (gen->retry++ >= FRC_CADENCE_SEQUENCE_MAX_RETRY)
2141 return 0;
2142
2143 for (pos = 0; pos < len; pos++) {
2144 for (i = 0; i < len; i++) {
2145 if (gen->cache[(i+len-1) % len]
2146 != gen->seq[(pos+i) % len])
2147 break;
2148 }
2149 if (i == len)
2150 return pos;
2151 }
2152
2153 return -EBADSLT;
2154}
2155
2156static void __reset_cache(struct mdss_mdp_frc_seq_gen *gen)
2157{
2158 memset(gen->cache, 0, gen->len * sizeof(int));
2159 gen->base = -1;
2160}
2161
2162static void __cache_last(struct mdss_mdp_frc_seq_gen *gen, int expected_vsync)
2163{
2164 int i = 0;
2165
2166 /* only cache last in case of pre-defined cadence */
2167 if ((gen->pos < 0) && (gen->len > 0)) {
2168 /* set first sample's expected vsync as base */
2169 if (gen->base < 0) {
2170 gen->base = expected_vsync;
2171 return;
2172 }
2173
2174 /* cache is 0 if not filled */
2175 while (gen->cache[i] && (i < gen->len))
2176 i++;
2177
2178 gen->cache[i] = expected_vsync - gen->base;
2179 gen->base = expected_vsync;
2180
2181 if (i == (gen->len - 1)) {
2182 /* find init pos in sequence when cache is full */
2183 gen->pos = __match_sequence(gen);
2184 /* reset cache and re-collect samples for matching */
2185 if (gen->pos < 0)
2186 __reset_cache(gen);
2187 }
2188 }
2189}
2190
2191static inline bool __is_seq_gen_matched(struct mdss_mdp_frc_seq_gen *gen)
2192{
2193 return (gen->len > 0) && (gen->pos >= 0);
2194}
2195
2196static int __expected_repeat(struct mdss_mdp_frc_seq_gen *gen)
2197{
2198 int next_repeat = -1;
2199
2200 if (__is_seq_gen_matched(gen)) {
2201 next_repeat = gen->seq[gen->pos];
2202 gen->pos = (gen->pos + 1) % gen->len;
2203 }
2204
2205 return next_repeat;
2206}
2207
2208static bool __is_display_fps_changed(struct msm_fb_data_type *mfd,
2209 struct mdss_mdp_frc_info *frc_info)
2210{
2211 bool display_fps_changed = false;
2212 u32 display_fp1000s = mdss_panel_get_framerate(mfd->panel_info,
2213 FPS_RESOLUTION_KHZ);
2214
2215 if (frc_info->display_fp1000s != display_fp1000s) {
2216 pr_debug("fps changes from %d to %d\n",
2217 frc_info->display_fp1000s, display_fp1000s);
2218 display_fps_changed = true;
2219 }
2220
2221 return display_fps_changed;
2222}
2223
2224static bool __is_video_fps_changed(struct mdss_mdp_frc_info *frc_info)
2225{
2226 bool video_fps_changed = false;
2227
2228 if ((frc_info->cur_frc.frame_cnt - frc_info->video_stat.frame_cnt)
2229 == FRC_VIDEO_FPS_DETECT_WINDOW) {
2230 s64 delta_t = frc_info->cur_frc.timestamp -
2231 frc_info->video_stat.timestamp;
2232
2233 if (frc_info->video_stat.last_delta) {
2234 video_fps_changed =
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302235 abs(delta_t - frc_info->video_stat.last_delta)
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302236 > (FRC_VIDEO_FPS_CHANGE_THRESHOLD_US *
2237 FRC_VIDEO_FPS_DETECT_WINDOW);
2238
2239 if (video_fps_changed)
2240 pr_info("video fps changed from [%d]%lld to [%d]%lld\n",
2241 frc_info->video_stat.frame_cnt,
2242 frc_info->video_stat.last_delta,
2243 frc_info->cur_frc.frame_cnt,
2244 delta_t);
2245 }
2246
2247 frc_info->video_stat.frame_cnt = frc_info->cur_frc.frame_cnt;
2248 frc_info->video_stat.timestamp = frc_info->cur_frc.timestamp;
2249 frc_info->video_stat.last_delta = delta_t;
2250 }
2251
2252 return video_fps_changed;
2253}
2254
2255static bool __is_video_seeking(struct mdss_mdp_frc_info *frc_info)
2256{
2257 s64 ts_diff =
2258 frc_info->cur_frc.timestamp - frc_info->last_frc.timestamp;
2259 bool video_seek = false;
2260
2261 video_seek = (ts_diff < 0)
2262 || (ts_diff > FRC_VIDEO_TS_DELTA_THRESHOLD_US);
2263
2264 if (video_seek)
2265 pr_debug("video seeking: %lld -> %lld\n",
2266 frc_info->last_frc.timestamp,
2267 frc_info->cur_frc.timestamp);
2268
2269 return video_seek;
2270}
2271
2272static bool __is_buffer_dropped(struct mdss_mdp_frc_info *frc_info)
2273{
2274 int buffer_drop_cnt
2275 = frc_info->cur_frc.frame_cnt - frc_info->last_frc.frame_cnt;
2276
2277 if (buffer_drop_cnt > 1) {
2278 struct mdss_mdp_frc_drop_stat *drop_stat = &frc_info->drop_stat;
2279
2280 /* collect dropping statistics */
2281 if (!drop_stat->drop_cnt)
2282 drop_stat->frame_cnt = frc_info->last_frc.frame_cnt;
2283
2284 drop_stat->drop_cnt++;
2285
2286 pr_info("video buffer drop from %d to %d\n",
2287 frc_info->last_frc.frame_cnt,
2288 frc_info->cur_frc.frame_cnt);
2289 }
2290 return buffer_drop_cnt > 1;
2291}
2292
2293static bool __is_too_many_drops(struct mdss_mdp_frc_info *frc_info)
2294{
2295 struct mdss_mdp_frc_drop_stat *drop_stat = &frc_info->drop_stat;
2296 bool too_many = false;
2297
2298 if (drop_stat->drop_cnt > FRC_MAX_VIDEO_DROPPING_CNT) {
2299 too_many = (frc_info->cur_frc.frame_cnt - drop_stat->frame_cnt
2300 < FRC_VIDEO_DROP_TOLERANCE_WINDOW);
2301 frc_info->drop_stat.drop_cnt = 0;
2302 }
2303
2304 return too_many;
2305}
2306
2307static bool __is_video_cnt_rollback(struct mdss_mdp_frc_info *frc_info)
2308{
2309 /* video frame_cnt is assumed to increase monotonically */
2310 bool video_rollback
2311 = (frc_info->cur_frc.frame_cnt < frc_info->last_frc.frame_cnt)
2312 || (frc_info->cur_frc.frame_cnt <
2313 frc_info->base_frc.frame_cnt);
2314
2315 if (video_rollback)
2316 pr_info("video frame_cnt rolls back from %d to %d\n",
2317 frc_info->last_frc.frame_cnt,
2318 frc_info->cur_frc.frame_cnt);
2319
2320 return video_rollback;
2321}
2322
2323static bool __is_video_pause(struct msm_fb_data_type *mfd,
2324 struct mdss_mdp_frc_info *frc_info)
2325{
2326 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2327 bool video_pause =
2328 (frc_info->cur_frc.frame_cnt - frc_info->last_frc.frame_cnt
2329 == 1)
2330 && (ctl->vsync_cnt - frc_info->last_vsync_cnt >
2331 FRC_VIDEO_PAUSE_THRESHOLD);
2332
2333 if (video_pause)
2334 pr_debug("video paused: vsync elapsed %d\n",
2335 ctl->vsync_cnt - frc_info->last_vsync_cnt);
2336
2337 return video_pause;
2338}
2339
2340/*
2341 * Workaround for some cases that video has the same timestamp for
2342 * different frame. E.g., video player might provide the same frame
2343 * twice to codec when seeking/flushing.
2344 */
2345static bool __is_timestamp_duplicated(struct mdss_mdp_frc_info *frc_info)
2346{
2347 bool ts_dup =
2348 (frc_info->cur_frc.frame_cnt != frc_info->last_frc.frame_cnt)
2349 && (frc_info->cur_frc.timestamp
2350 == frc_info->last_frc.timestamp);
2351
2352 if (ts_dup)
2353 pr_info("timestamp of frame %d and %d are duplicated\n",
2354 frc_info->last_frc.frame_cnt,
2355 frc_info->cur_frc.frame_cnt);
2356
2357 return ts_dup;
2358}
2359
2360static void __set_frc_base(struct msm_fb_data_type *mfd,
2361 struct mdss_mdp_frc_info *frc_info)
2362{
2363 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2364
2365 frc_info->base_vsync_cnt = ctl->vsync_cnt;
2366 frc_info->base_frc = frc_info->cur_frc;
2367 frc_info->last_frc = frc_info->cur_frc;
2368 frc_info->last_repeat = 0;
2369 frc_info->last_vsync_cnt = 0;
2370 frc_info->cadence_id = FRC_CADENCE_NONE;
2371 frc_info->video_stat.last_delta = 0;
2372 frc_info->video_stat.frame_cnt = frc_info->cur_frc.frame_cnt;
2373 frc_info->video_stat.timestamp = frc_info->cur_frc.timestamp;
2374 frc_info->display_fp1000s =
2375 mdss_panel_get_framerate(mfd->panel_info, FPS_RESOLUTION_KHZ);
2376
2377
2378 pr_debug("frc_base: vsync_cnt=%d frame_cnt=%d timestamp=%lld\n",
2379 frc_info->base_vsync_cnt, frc_info->cur_frc.frame_cnt,
2380 frc_info->cur_frc.timestamp);
2381}
2382
2383/* calculate when we'd like to kickoff current frame based on its timestamp */
2384static int __calculate_remaining_vsync(struct msm_fb_data_type *mfd,
2385 struct mdss_mdp_frc_info *frc_info)
2386{
2387 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2388 struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
2389 struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
2390 int vsync_diff, expected_vsync_cnt, remaining_vsync;
2391
2392 /* how many vsync intervals between current & base */
2393 vsync_diff = __compute_vsync_diff(cur_frc->timestamp,
2394 base_frc->timestamp, frc_info->display_fp1000s);
2395
2396 /* expected vsync where we'd like to kickoff current frame */
2397 expected_vsync_cnt = frc_info->base_vsync_cnt + vsync_diff;
2398 /* how many remaining vsync we need display till kickoff */
2399 remaining_vsync = expected_vsync_cnt - ctl->vsync_cnt;
2400
2401 pr_debug("frc: expected_vsync_cnt=%d, cur_vsync_cnt=%d, remaining=%d\n",
2402 expected_vsync_cnt, ctl->vsync_cnt, remaining_vsync);
2403
2404 return remaining_vsync;
2405}
2406
2407/* tune latency computed previously if possible jitter exists */
2408static int __tune_possible_jitter(struct msm_fb_data_type *mfd,
2409 struct mdss_mdp_frc_info *frc_info, int remaining_vsync)
2410{
2411 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2412 int cadence_id = frc_info->cadence_id;
2413 int remaining = remaining_vsync;
2414 int expected_repeat = __expected_repeat(&frc_info->gen);
2415
2416 if (cadence_id && (expected_repeat > 0)) {
2417 int expected_vsync_cnt = remaining + ctl->vsync_cnt;
2418 /* how many times current frame will be repeated */
2419 int cur_repeat = expected_vsync_cnt - frc_info->last_vsync_cnt;
2420
2421 remaining -= cur_repeat - expected_repeat;
2422 pr_debug("frc: tune vsync, input=%d, output=%d, last_repeat=%d, cur_repeat=%d, expected_repeat=%d\n",
2423 remaining_vsync, remaining, frc_info->last_repeat,
2424 cur_repeat, expected_repeat);
2425 }
2426
2427 return remaining;
2428}
2429
2430/* compute how many vsync we still need to wait for keeping cadence */
2431static int __calculate_remaining_repeat(struct msm_fb_data_type *mfd,
2432 struct mdss_mdp_frc_info *frc_info)
2433{
2434 int remaining_vsync = __calculate_remaining_vsync(mfd, frc_info);
2435
2436 remaining_vsync =
2437 __tune_possible_jitter(mfd, frc_info, remaining_vsync);
2438
2439 return remaining_vsync;
2440}
2441
2442static int __repeat_current_frame(struct mdss_mdp_ctl *ctl, int repeat)
2443{
2444 int expected_vsync = ctl->vsync_cnt + repeat;
2445 int cnt = 0;
2446 int ret = 0;
2447
2448 while (ctl->vsync_cnt < expected_vsync) {
2449 cnt++;
2450 if (ctl->ops.wait_vsync_fnc) {
2451 ret = ctl->ops.wait_vsync_fnc(ctl);
2452 if (ret < 0)
2453 break;
2454 }
2455 }
2456
2457 if (ret)
2458 pr_err("wrong waiting: repeat %d, actual: %d\n", repeat, cnt);
2459
2460 return ret;
2461}
2462
2463static void __save_last_frc_info(struct mdss_mdp_ctl *ctl,
2464 struct mdss_mdp_frc_info *frc_info)
2465{
2466 /* save last data */
2467 frc_info->last_frc = frc_info->cur_frc;
2468 frc_info->last_repeat = ctl->vsync_cnt - frc_info->last_vsync_cnt;
2469 frc_info->last_vsync_cnt = ctl->vsync_cnt;
2470}
2471
2472static void cadence_detect_callback(struct mdss_mdp_frc_fsm *frc_fsm)
2473{
2474 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2475
2476 __init_cadence_calc(&frc_info->calc);
2477}
2478
2479static void seq_match_callback(struct mdss_mdp_frc_fsm *frc_fsm)
2480{
2481 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2482
2483 __init_seq_gen(&frc_info->gen, frc_info->cadence_id);
2484}
2485
2486static void frc_disable_callback(struct mdss_mdp_frc_fsm *frc_fsm)
2487{
2488 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2489
2490 frc_info->cadence_id = FRC_CADENCE_DISABLE;
2491}
2492
2493/* default behavior of FRC FSM */
2494static bool __is_frc_state_changed_in_default(struct msm_fb_data_type *mfd,
2495 struct mdss_mdp_frc_info *frc_info)
2496{
2497 /*
2498 * Need change to INIT state in case of 2 changes:
2499 *
2500 * 1) video frame_cnt has been rolled back by codec.
2501 * 2) video fast-foward or rewind. Sometimes video seeking might cause
2502 * buffer drop as well, so check seek ahead of buffer drop in order
2503 * to avoid duplicated check.
2504 * 3) buffer drop.
2505 * 4) display fps has changed.
2506 * 5) video frame rate has changed.
2507 * 6) video pauses. it could be considered as lag case.
2508 * 7) duplicated timestamp of different frames which breaks FRC.
2509 */
2510 return (__is_video_cnt_rollback(frc_info) ||
2511 __is_video_seeking(frc_info) ||
2512 __is_buffer_dropped(frc_info) ||
2513 __is_display_fps_changed(mfd, frc_info) ||
2514 __is_video_fps_changed(frc_info) ||
2515 __is_video_pause(mfd, frc_info) ||
2516 __is_timestamp_duplicated(frc_info));
2517}
2518
2519static void __pre_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
2520{
2521 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
2522 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2523
2524 if (__is_too_many_drops(frc_info)) {
2525 /*
2526 * disable frc when dropping too many buffers, this might happen
2527 * in some extreme cases like video is heavily loaded so any
2528 * extra latency could make things worse.
2529 */
2530 pr_info("disable frc because there're too many drops\n");
2531 mdss_mdp_frc_fsm_change_state(frc_fsm,
2532 FRC_STATE_DISABLE, frc_disable_callback);
2533 mdss_mdp_frc_fsm_update_state(frc_fsm);
2534 } else if (__is_frc_state_changed_in_default(mfd, frc_info)) {
2535 /* FRC status changed so reset to INIT state */
2536 mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
2537 mdss_mdp_frc_fsm_update_state(frc_fsm);
2538 }
2539}
2540
2541static void __do_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
2542{
2543 /* do nothing */
2544}
2545
2546static void __post_frc_in_default(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
2547{
2548 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
2549 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2550 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2551
2552 __save_last_frc_info(ctl, frc_info);
2553
2554 /* update frc_fsm state to new state for the next round */
2555 mdss_mdp_frc_fsm_update_state(frc_fsm);
2556}
2557
2558/* behavior of FRC FSM in INIT state */
2559static void __do_frc_in_init_state(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
2560{
2561 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
2562 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2563
2564 __set_frc_base(mfd, frc_info);
2565
2566 mdss_mdp_frc_fsm_change_state(frc_fsm,
2567 FRC_STATE_CADENCE_DETECT, cadence_detect_callback);
2568}
2569
2570/* behavior of FRC FSM in CADENCE_DETECT state */
2571static void __do_frc_in_cadence_detect_state(struct mdss_mdp_frc_fsm *frc_fsm,
2572 void *arg)
2573{
2574 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2575 struct mdss_mdp_frc_cadence_calc *calc = &frc_info->calc;
2576
2577 if (calc->sample_cnt < FRC_CADENCE_DETECT_WINDOW) {
2578 calc->samples[calc->sample_cnt++] = frc_info->cur_frc;
2579 } else {
2580 /*
2581 * Get enough samples and check candence. FRC_CADENCE_23
2582 * and FRC_CADENCE_22 need >= 2 deltas, and >= 5 deltas
2583 * are necessary for computing FRC_CADENCE_23223.
2584 */
2585 u32 cadence_id = FRC_CADENCE_23;
2586 u32 sample_cnt[FRC_MAX_SUPPORT_CADENCE] = {0, 5, 5, 6};
2587
2588 while (cadence_id < FRC_CADENCE_FREE_RUN) {
2589 if (cadence_id ==
2590 __calculate_cadence_id(frc_info,
2591 sample_cnt[cadence_id]))
2592 break;
2593 cadence_id++;
2594 }
2595
2596 frc_info->cadence_id = cadence_id;
2597 pr_info("frc: cadence_id=%d\n", cadence_id);
2598
2599 /* detected supported cadence, start sequence match */
2600 if (__is_supported_candence(frc_info->cadence_id))
2601 mdss_mdp_frc_fsm_change_state(frc_fsm,
2602 FRC_STATE_SEQ_MATCH, seq_match_callback);
2603 else
2604 mdss_mdp_frc_fsm_change_state(frc_fsm,
2605 FRC_STATE_FREERUN, NULL);
2606 }
2607}
2608
2609/* behavior of FRC FSM in SEQ_MATCH state */
2610static void __do_frc_in_seq_match_state(struct mdss_mdp_frc_fsm *frc_fsm,
2611 void *arg)
2612{
2613 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2614 struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
2615 struct mdss_mdp_frc_data *base_frc = &frc_info->base_frc;
2616 int vsync_diff;
2617
2618 /* how many vsync intervals between current & base */
2619 vsync_diff = __compute_vsync_diff(cur_frc->timestamp,
2620 base_frc->timestamp, frc_info->display_fp1000s);
2621
2622 /* cache vsync diff to compute start pos in cadence */
2623 __cache_last(&frc_info->gen, vsync_diff);
2624
2625 if (__is_seq_gen_matched(&frc_info->gen))
2626 mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_READY, NULL);
2627}
2628
2629/* behavior of FRC FSM in FREE_RUN state */
2630static bool __is_frc_state_changed_in_freerun_state(
2631 struct msm_fb_data_type *mfd,
2632 struct mdss_mdp_frc_info *frc_info)
2633{
2634 /*
2635 * Only need change to INIT state in case of 2 changes:
2636 *
2637 * 1) display fps has changed.
2638 * 2) video frame rate has changed.
2639 */
2640 return (__is_display_fps_changed(mfd, frc_info) ||
2641 __is_video_fps_changed(frc_info));
2642}
2643
2644static void __pre_frc_in_freerun_state(struct mdss_mdp_frc_fsm *frc_fsm,
2645 void *arg)
2646{
2647 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
2648 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2649
2650 /* FRC status changed so reset to INIT state */
2651 if (__is_frc_state_changed_in_freerun_state(mfd, frc_info)) {
2652 /* update state to INIT immediately */
2653 mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
2654 mdss_mdp_frc_fsm_update_state(frc_fsm);
2655 }
2656}
2657
2658/* behavior of FRC FSM in READY state */
2659static void __do_frc_in_ready_state(struct mdss_mdp_frc_fsm *frc_fsm, void *arg)
2660{
2661 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)arg;
2662 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2663 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2664 struct mdss_mdp_frc_data *cur_frc = &frc_info->cur_frc;
2665
2666 int remaining_repeat =
2667 __calculate_remaining_repeat(mfd, frc_info);
2668
2669 mdss_debug_frc_add_kickoff_sample_pre(ctl, frc_info, remaining_repeat);
2670
2671 /* video arrives later than expected */
2672 if (remaining_repeat < 0) {
2673 pr_info("Frame %d lags behind %d vsync\n",
2674 cur_frc->frame_cnt, -remaining_repeat);
2675 mdss_mdp_frc_fsm_change_state(frc_fsm, FRC_STATE_INIT, NULL);
2676 remaining_repeat = 0;
2677 }
2678
2679 if (mdss_debug_frc_frame_repeat_disabled())
2680 remaining_repeat = 0;
2681
2682 __repeat_current_frame(ctl, remaining_repeat);
2683
2684 mdss_debug_frc_add_kickoff_sample_post(ctl, frc_info, remaining_repeat);
2685}
2686
2687/* behavior of FRC FSM in DISABLE state */
2688static void __pre_frc_in_disable_state(struct mdss_mdp_frc_fsm *frc_fsm,
2689 void *arg)
2690{
2691 /* do nothing */
2692}
2693
2694static void __post_frc_in_disable_state(struct mdss_mdp_frc_fsm *frc_fsm,
2695 void *arg)
2696{
2697 /* do nothing */
2698}
2699
2700static int __config_secure_display(struct mdss_overlay_private *mdp5_data)
2701{
2702 int panel_type = mdp5_data->ctl->panel_data->panel_info.type;
2703 int sd_enable = -1; /* Since 0 is a valid state, initialize with -1 */
2704 int ret = 0;
2705
2706 if (panel_type == MIPI_CMD_PANEL)
2707 mdss_mdp_display_wait4pingpong(mdp5_data->ctl, true);
2708
2709 /*
2710 * Start secure display session if we are transitioning from non secure
2711 * to secure display.
2712 */
2713 if (mdp5_data->sd_transition_state ==
2714 SD_TRANSITION_NON_SECURE_TO_SECURE)
2715 sd_enable = 1;
2716
2717 /*
2718 * For command mode panels, if we are trasitioning from secure to
2719 * non secure session, disable the secure display, as we've already
2720 * waited for the previous frame transfer.
2721 */
2722 if ((panel_type == MIPI_CMD_PANEL) &&
2723 (mdp5_data->sd_transition_state ==
2724 SD_TRANSITION_SECURE_TO_NON_SECURE))
2725 sd_enable = 0;
2726
2727 if (sd_enable != -1) {
2728 ret = mdss_mdp_secure_display_ctrl(mdp5_data->mdata, sd_enable);
2729 if (!ret)
2730 mdp5_data->sd_enabled = sd_enable;
2731 }
2732
2733 return ret;
2734}
2735
2736/* predefined state table of FRC FSM */
2737static struct mdss_mdp_frc_fsm_state frc_fsm_states[FRC_STATE_MAX] = {
2738 {
2739 .name = "FRC_FSM_INIT",
2740 .state = FRC_STATE_INIT,
2741 .ops = {
2742 .pre_frc = __pre_frc_in_default,
2743 .do_frc = __do_frc_in_init_state,
2744 .post_frc = __post_frc_in_default,
2745 },
2746 },
2747
2748 {
2749 .name = "FRC_FSM_CADENCE_DETECT",
2750 .state = FRC_STATE_CADENCE_DETECT,
2751 .ops = {
2752 .pre_frc = __pre_frc_in_default,
2753 .do_frc = __do_frc_in_cadence_detect_state,
2754 .post_frc = __post_frc_in_default,
2755 },
2756 },
2757
2758 {
2759 .name = "FRC_FSM_SEQ_MATCH",
2760 .state = FRC_STATE_SEQ_MATCH,
2761 .ops = {
2762 .pre_frc = __pre_frc_in_default,
2763 .do_frc = __do_frc_in_seq_match_state,
2764 .post_frc = __post_frc_in_default,
2765 },
2766 },
2767
2768 {
2769 .name = "FRC_FSM_FREERUN",
2770 .state = FRC_STATE_FREERUN,
2771 .ops = {
2772 .pre_frc = __pre_frc_in_freerun_state,
2773 .do_frc = __do_frc_in_default,
2774 .post_frc = __post_frc_in_default,
2775 },
2776 },
2777
2778 {
2779 .name = "FRC_FSM_READY",
2780 .state = FRC_STATE_READY,
2781 .ops = {
2782 .pre_frc = __pre_frc_in_default,
2783 .do_frc = __do_frc_in_ready_state,
2784 .post_frc = __post_frc_in_default,
2785 },
2786 },
2787
2788 {
2789 .name = "FRC_FSM_DISABLE",
2790 .state = FRC_STATE_DISABLE,
2791 .ops = {
2792 .pre_frc = __pre_frc_in_disable_state,
2793 .do_frc = __do_frc_in_default,
2794 .post_frc = __post_frc_in_disable_state,
2795 },
2796 },
2797};
2798
2799/*
2800 * FRC FSM operations:
2801 * mdss_mdp_frc_fsm_init_state: Init FSM state.
2802 * mdss_mdp_frc_fsm_change_state: Change FSM state. The desired state will not
2803 * be effective till update_state is called.
2804 * mdss_mdp_frc_fsm_update_state: Update FSM state. Changed state is effective
2805 * immediately once this function is called.
2806 */
2807void mdss_mdp_frc_fsm_init_state(struct mdss_mdp_frc_fsm *frc_fsm)
2808{
2809 pr_debug("frc_fsm: init frc fsm state\n");
2810 frc_fsm->state = frc_fsm->to_state = frc_fsm_states[FRC_STATE_INIT];
2811 memset(&frc_fsm->frc_info, 0, sizeof(struct mdss_mdp_frc_info));
2812}
2813
2814void mdss_mdp_frc_fsm_change_state(struct mdss_mdp_frc_fsm *frc_fsm,
2815 enum mdss_mdp_frc_state_type state,
2816 void (*cb)(struct mdss_mdp_frc_fsm *frc_fsm))
2817{
2818 if (state != frc_fsm->state.state) {
2819 pr_debug("frc_fsm: state changes from %s to %s\n",
2820 frc_fsm->state.name,
2821 frc_fsm_states[state].name);
2822 frc_fsm->to_state = frc_fsm_states[state];
2823 frc_fsm->cbs.update_state_cb = cb;
2824 }
2825}
2826
2827void mdss_mdp_frc_fsm_update_state(struct mdss_mdp_frc_fsm *frc_fsm)
2828{
2829 if (frc_fsm->to_state.state != frc_fsm->state.state) {
2830 pr_debug("frc_fsm: state updates from %s to %s\n",
2831 frc_fsm->state.name,
2832 frc_fsm->to_state.name);
2833
2834 if (frc_fsm->cbs.update_state_cb)
2835 frc_fsm->cbs.update_state_cb(frc_fsm);
2836
2837 frc_fsm->state = frc_fsm->to_state;
2838 }
2839}
2840
2841static void mdss_mdp_overlay_update_frc(struct msm_fb_data_type *mfd)
2842{
2843 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
2844 struct mdss_mdp_frc_fsm *frc_fsm = mdp5_data->frc_fsm;
2845 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2846
2847 if (__validate_frc_info(frc_info)) {
2848 struct mdss_mdp_frc_fsm_state *state = &frc_fsm->state;
2849
2850 state->ops.pre_frc(frc_fsm, mfd);
2851 state->ops.do_frc(frc_fsm, mfd);
2852 state->ops.post_frc(frc_fsm, mfd);
2853 }
2854}
2855
2856int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
2857 struct mdp_display_commit *data)
2858{
2859 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
2860 struct mdss_mdp_pipe *pipe, *tmp;
2861 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
2862 int ret = 0;
2863 struct mdss_mdp_commit_cb commit_cb;
2864 u8 sd_transition_state = 0;
2865
2866 if (!ctl || !ctl->mixer_left)
2867 return -ENODEV;
2868
2869 ATRACE_BEGIN(__func__);
2870 if (ctl->shared_lock) {
2871 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
2872 mutex_lock(ctl->shared_lock);
2873 }
2874
2875 mutex_lock(&mdp5_data->ov_lock);
2876 ctl->bw_pending = 0;
2877 ret = mdss_mdp_overlay_start(mfd);
2878 if (ret) {
2879 pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
2880 mutex_unlock(&mdp5_data->ov_lock);
2881 if (ctl->shared_lock)
2882 mutex_unlock(ctl->shared_lock);
2883 return ret;
2884 }
2885
2886 ret = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302887 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302888 pr_err("iommu attach failed rc=%d\n", ret);
2889 mutex_unlock(&mdp5_data->ov_lock);
2890 if (ctl->shared_lock)
2891 mutex_unlock(ctl->shared_lock);
2892 return ret;
2893 }
2894 mutex_lock(&mdp5_data->list_lock);
2895
2896 if (!ctl->shared_lock)
2897 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
2898
2899 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2900
2901 mdss_mdp_check_ctl_reset_status(ctl);
2902 __validate_and_set_roi(mfd, data);
2903
2904 if (ctl->ops.wait_pingpong && mdp5_data->mdata->serialize_wait4pp)
2905 mdss_mdp_display_wait4pingpong(ctl, true);
2906
2907 sd_transition_state = mdp5_data->sd_transition_state;
2908 if (sd_transition_state != SD_TRANSITION_NONE) {
2909 ret = __config_secure_display(mdp5_data);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302910 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302911 pr_err("Secure session config failed\n");
2912 goto commit_fail;
2913 }
2914 }
2915
Krishna Manikandan88b8fb72018-03-28 19:46:15 +05302916 if (!data) {
2917 atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
2918 MDSS_XLOG(atomic_read(&mfd->mdp_sync_pt_data.commit_cnt));
2919 }
2920
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302921 /*
2922 * Setup pipe in solid fill before unstaging,
2923 * to ensure no fetches are happening after dettach or reattach.
2924 */
2925 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_cleanup, list) {
2926 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
2927 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
2928 pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
2929 list_move(&pipe->list, &mdp5_data->pipes_destroy);
2930 }
2931
2932 /* call this function before any registers programming */
2933 if (ctl->ops.pre_programming)
2934 ctl->ops.pre_programming(ctl);
2935
2936 ATRACE_BEGIN("sspp_programming");
2937 ret = __overlay_queue_pipes(mfd);
2938 ATRACE_END("sspp_programming");
2939 mutex_unlock(&mdp5_data->list_lock);
2940
2941 mdp5_data->kickoff_released = false;
2942
2943 if (mdp5_data->frc_fsm->enable)
2944 mdss_mdp_overlay_update_frc(mfd);
2945
2946 if (mfd->panel.type == WRITEBACK_PANEL) {
2947 ATRACE_BEGIN("wb_kickoff");
2948 commit_cb.commit_cb_fnc = mdss_mdp_commit_cb;
2949 commit_cb.data = mfd;
2950 ret = mdss_mdp_wfd_kickoff(mdp5_data->wfd, &commit_cb);
2951 ATRACE_END("wb_kickoff");
2952 } else {
2953 ATRACE_BEGIN("display_commit");
2954 commit_cb.commit_cb_fnc = mdss_mdp_commit_cb;
2955 commit_cb.data = mfd;
2956 ret = mdss_mdp_display_commit(mdp5_data->ctl, NULL,
2957 &commit_cb);
2958 ATRACE_END("display_commit");
2959 }
2960 __vsync_set_vsync_handler(mfd);
2961
2962 /*
2963 * release the commit pending flag; we are releasing this flag
2964 * after the commit, since now the transaction status
2965 * in the cmd mode controllers is busy.
2966 */
2967 mfd->atomic_commit_pending = false;
2968
2969 if (!mdp5_data->kickoff_released)
2970 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
2971
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302972 if (IS_ERR_VALUE((unsigned long)ret))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302973 goto commit_fail;
2974
2975 mutex_unlock(&mdp5_data->ov_lock);
2976 mdss_mdp_overlay_update_pm(mdp5_data);
2977
2978 ATRACE_BEGIN("display_wait4comp");
2979 ret = mdss_mdp_display_wait4comp(mdp5_data->ctl);
2980 ATRACE_END("display_wait4comp");
2981 mdss_mdp_splash_cleanup(mfd, true);
2982
2983 /*
2984 * Configure Timing Engine, if new fps was set.
2985 * We need to do this after the wait for vsync
2986 * to guarantee that mdp flush bit and dsi flush
2987 * bit are set within the same vsync period
2988 * regardless of mdp revision.
2989 */
2990 ATRACE_BEGIN("fps_update");
2991 ret = mdss_mdp_ctl_update_fps(ctl);
2992 ATRACE_END("fps_update");
2993
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302994 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302995 pr_err("failed to update fps!\n");
2996 goto commit_fail;
2997 }
2998
2999 mutex_lock(&mdp5_data->ov_lock);
3000 /*
3001 * If we are transitioning from secure to non-secure display,
3002 * disable the secure display.
3003 */
3004 if (mdp5_data->sd_enabled && (sd_transition_state ==
3005 SD_TRANSITION_SECURE_TO_NON_SECURE)) {
3006 ret = mdss_mdp_secure_display_ctrl(mdp5_data->mdata, 0);
3007 if (!ret)
3008 mdp5_data->sd_enabled = 0;
3009 }
3010
3011 mdss_fb_update_notify_update(mfd);
3012commit_fail:
3013 ATRACE_BEGIN("overlay_cleanup");
3014 mdss_mdp_overlay_cleanup(mfd, &mdp5_data->pipes_destroy);
3015 ATRACE_END("overlay_cleanup");
3016 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
3017 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_FLUSHED);
3018 if (!mdp5_data->kickoff_released)
3019 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CTX_DONE);
3020
3021 mutex_unlock(&mdp5_data->ov_lock);
3022 if (ctl->shared_lock)
3023 mutex_unlock(ctl->shared_lock);
3024 mdss_iommu_ctrl(0);
3025 ATRACE_END(__func__);
3026
3027 return ret;
3028}
3029
3030int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
3031{
3032 struct mdss_mdp_pipe *pipe, *tmp;
3033 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3034 u32 unset_ndx = 0;
3035
3036 mutex_lock(&mdp5_data->list_lock);
3037 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
3038 if (pipe->ndx & ndx) {
3039 if (mdss_mdp_pipe_map(pipe)) {
3040 pr_err("Unable to map used pipe%d ndx=%x\n",
3041 pipe->num, pipe->ndx);
3042 continue;
3043 }
3044
3045 unset_ndx |= pipe->ndx;
3046
3047 pipe->file = NULL;
3048 list_move(&pipe->list, &mdp5_data->pipes_cleanup);
3049
3050 mdss_mdp_pipe_unmap(pipe);
3051
3052 if (unset_ndx == ndx)
3053 break;
3054 }
3055 }
3056 mutex_unlock(&mdp5_data->list_lock);
3057
3058 if (unset_ndx != ndx) {
3059 pr_warn("Unable to unset pipe(s) ndx=0x%x unset=0x%x\n",
3060 ndx, unset_ndx);
3061 return -ENOENT;
3062 }
3063
3064 return 0;
3065}
3066
3067static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
3068{
3069 int ret = 0;
3070 struct mdss_overlay_private *mdp5_data;
3071
3072 if (!mfd)
3073 return -ENODEV;
3074
3075 mdp5_data = mfd_to_mdp5_data(mfd);
3076
3077 if (!mdp5_data || !mdp5_data->ctl)
3078 return -ENODEV;
3079
3080 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
3081 if (ret)
3082 return ret;
3083
3084 if (ndx == BORDERFILL_NDX) {
3085 pr_debug("borderfill disable\n");
3086 mdp5_data->borderfill_enable = false;
3087 ret = 0;
3088 goto done;
3089 }
3090
3091 if (mdss_fb_is_power_off(mfd)) {
3092 ret = -EPERM;
3093 goto done;
3094 }
3095
3096 pr_debug("unset ndx=%x\n", ndx);
3097
3098 ret = mdss_mdp_overlay_release(mfd, ndx);
3099
3100done:
3101 mutex_unlock(&mdp5_data->ov_lock);
3102
3103 return ret;
3104}
3105
3106/**
3107 * mdss_mdp_overlay_release_all() - release any overlays associated with fb dev
3108 * @mfd: Msm frame buffer structure associated with fb device
3109 * @release_all: ignore pid and release all the pipes
3110 *
3111 * Release any resources allocated by calling process, this can be called
3112 * on fb_release to release any overlays/rotator sessions left open.
3113 *
3114 * Return number of resources released
3115 */
3116static int __mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd,
3117 struct file *file)
3118{
3119 struct mdss_mdp_pipe *pipe, *tmp;
3120 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3121 u32 unset_ndx = 0;
3122 int cnt = 0;
3123
3124 pr_debug("releasing all resources for fb%d file:%pK\n",
3125 mfd->index, file);
3126
3127 mutex_lock(&mdp5_data->ov_lock);
3128 mutex_lock(&mdp5_data->list_lock);
3129 if (!mfd->ref_cnt && !list_empty(&mdp5_data->pipes_cleanup)) {
3130 pr_debug("fb%d:: free pipes present in cleanup list",
3131 mfd->index);
3132 cnt++;
3133 }
3134
3135 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
3136 if (!file || pipe->file == file) {
3137 unset_ndx |= pipe->ndx;
3138 pipe->file = NULL;
3139 list_move(&pipe->list, &mdp5_data->pipes_cleanup);
3140 cnt++;
3141 }
3142 }
3143
3144 pr_debug("mfd->ref_cnt=%d unset_ndx=0x%x cnt=%d\n",
3145 mfd->ref_cnt, unset_ndx, cnt);
3146
3147 mutex_unlock(&mdp5_data->list_lock);
3148 mutex_unlock(&mdp5_data->ov_lock);
3149
3150 return cnt;
3151}
3152
3153static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
3154 struct msmfb_overlay_data *req)
3155{
3156 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3157 struct mdss_mdp_pipe *pipe;
3158 struct mdss_mdp_data *src_data;
3159 struct mdp_layer_buffer buffer;
3160 int ret;
3161 u32 flags;
3162
3163 pipe = __overlay_find_pipe(mfd, req->id);
3164 if (!pipe) {
3165 pr_err("pipe ndx=%x doesn't exist\n", req->id);
3166 return -ENODEV;
3167 }
3168
3169 if (pipe->dirty) {
3170 pr_warn("dirty pipe, will not queue pipe pnum=%d\n", pipe->num);
3171 return -ENODEV;
3172 }
3173
3174 ret = mdss_mdp_pipe_map(pipe);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303175 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303176 pr_err("Unable to map used pipe%d ndx=%x\n",
3177 pipe->num, pipe->ndx);
3178 return ret;
3179 }
3180
3181 pr_debug("ov queue pnum=%d\n", pipe->num);
3182
3183 if (pipe->flags & MDP_SOLID_FILL)
3184 pr_warn("Unexpected buffer queue to a solid fill pipe\n");
3185
3186 flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
3187 MDP_SECURE_DISPLAY_OVERLAY_SESSION));
3188
3189 mutex_lock(&mdp5_data->list_lock);
3190 src_data = mdss_mdp_overlay_buf_alloc(mfd, pipe);
3191 if (!src_data) {
3192 pr_err("unable to allocate source buffer\n");
3193 ret = -ENOMEM;
3194 } else {
3195 buffer.width = pipe->img_width;
3196 buffer.height = pipe->img_height;
3197 buffer.format = pipe->src_fmt->format;
3198 ret = mdss_mdp_data_get_and_validate_size(src_data, &req->data,
3199 1, flags, &mfd->pdev->dev, false, DMA_TO_DEVICE,
3200 &buffer);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303201 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303202 mdss_mdp_overlay_buf_free(mfd, src_data);
3203 pr_err("src_data pmem error\n");
3204 }
3205 }
3206 mutex_unlock(&mdp5_data->list_lock);
3207
3208 mdss_mdp_pipe_unmap(pipe);
3209
3210 return ret;
3211}
3212
3213static int mdss_mdp_overlay_play(struct msm_fb_data_type *mfd,
3214 struct msmfb_overlay_data *req)
3215{
3216 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3217 int ret = 0;
3218
3219 pr_debug("play req id=%x\n", req->id);
3220
3221 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
3222 if (ret)
3223 return ret;
3224
3225 if (mdss_fb_is_power_off(mfd)) {
3226 ret = -EPERM;
3227 goto done;
3228 }
3229
3230 if (req->id == BORDERFILL_NDX) {
3231 pr_debug("borderfill enable\n");
3232 mdp5_data->borderfill_enable = true;
3233 ret = mdss_mdp_overlay_free_fb_pipe(mfd);
3234 } else {
3235 ret = mdss_mdp_overlay_queue(mfd, req);
3236 }
3237
3238done:
3239 mutex_unlock(&mdp5_data->ov_lock);
3240
3241 return ret;
3242}
3243
3244static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd)
3245{
3246 struct mdss_mdp_pipe *pipe;
3247 u32 fb_ndx = 0;
3248 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3249
3250 pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl,
3251 MDSS_MDP_MIXER_MUX_LEFT, MDSS_MDP_STAGE_BASE, false);
3252 if (pipe)
3253 fb_ndx |= pipe->ndx;
3254
3255 pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl,
3256 MDSS_MDP_MIXER_MUX_RIGHT, MDSS_MDP_STAGE_BASE, false);
3257 if (pipe)
3258 fb_ndx |= pipe->ndx;
3259
3260 if (fb_ndx) {
3261 pr_debug("unstaging framebuffer pipes %x\n", fb_ndx);
3262 mdss_mdp_overlay_release(mfd, fb_ndx);
3263 }
3264 return 0;
3265}
3266
3267static int mdss_mdp_overlay_get_fb_pipe(struct msm_fb_data_type *mfd,
3268 struct mdss_mdp_pipe **ppipe,
3269 int mixer_mux, bool *pipe_allocated)
3270{
3271 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3272 struct mdss_mdp_pipe *pipe;
3273 int ret = 0;
3274 struct mdp_overlay *req = NULL;
3275
3276 *pipe_allocated = false;
3277 pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl, mixer_mux,
3278 MDSS_MDP_STAGE_BASE, false);
3279
3280 if (pipe == NULL) {
3281 struct fb_info *fbi = mfd->fbi;
3282 struct mdss_mdp_mixer *mixer;
3283 int bpp;
3284 bool rotate_180 = (fbi->var.rotate == FB_ROTATE_UD);
3285 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
3286 bool split_lm = (fbi->var.xres > mdata->max_mixer_width ||
3287 is_split_lm(mfd));
3288 struct mdp_rect left_rect, right_rect;
3289
3290 mixer = mdss_mdp_mixer_get(mdp5_data->ctl,
3291 MDSS_MDP_MIXER_MUX_LEFT);
3292 if (!mixer) {
3293 pr_err("unable to retrieve mixer\n");
3294 return -ENODEV;
3295 }
3296
3297 req = kcalloc(1, sizeof(struct mdp_overlay), GFP_KERNEL);
3298 if (!req)
3299 return -ENOMEM;
3300
3301 bpp = fbi->var.bits_per_pixel / 8;
3302 req->id = MSMFB_NEW_REQUEST;
3303 req->src.format = mfd->fb_imgType;
3304 req->src.height = fbi->var.yres;
3305 req->src.width = fbi->fix.line_length / bpp;
3306
3307 left_rect.x = 0;
3308 left_rect.w = MIN(fbi->var.xres, mixer->width);
3309 left_rect.y = 0;
3310 left_rect.h = req->src.height;
3311
3312 right_rect.x = mixer->width;
3313 right_rect.w = fbi->var.xres - mixer->width;
3314 right_rect.y = 0;
3315 right_rect.h = req->src.height;
3316
3317 if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) {
3318 if (req->src.width <= mixer->width) {
3319 pr_warn("right fb pipe not needed\n");
3320 ret = -EINVAL;
3321 goto done;
3322 }
3323 req->src_rect = req->dst_rect = right_rect;
3324 if (split_lm && rotate_180)
3325 req->src_rect = left_rect;
3326 } else {
3327 req->src_rect = req->dst_rect = left_rect;
3328 if (split_lm && rotate_180)
3329 req->src_rect = right_rect;
3330 }
3331
3332 req->z_order = MDSS_MDP_STAGE_BASE;
3333 if (rotate_180)
3334 req->flags |= (MDP_FLIP_LR | MDP_FLIP_UD);
3335
3336 pr_debug("allocating base pipe mux=%d\n", mixer_mux);
3337
3338 ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL,
3339 false);
3340 if (ret)
3341 goto done;
3342
3343 *pipe_allocated = true;
3344 }
3345 pr_debug("ctl=%d pnum=%d\n", mdp5_data->ctl->num, pipe->num);
3346
3347 *ppipe = pipe;
3348
3349done:
3350 kfree(req);
3351 return ret;
3352}
3353
3354static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
3355{
3356 struct mdss_mdp_data *buf_l = NULL, *buf_r = NULL;
3357 struct mdss_mdp_pipe *l_pipe, *r_pipe, *pipe, *tmp;
3358 struct fb_info *fbi;
3359 struct mdss_overlay_private *mdp5_data;
3360 struct mdss_data_type *mdata;
3361 u32 offset;
3362 int bpp, ret;
3363 bool l_pipe_allocated = false, r_pipe_allocated = false;
3364
3365 if (!mfd || !mfd->mdp.private1)
3366 return;
3367
3368 mdata = mfd_to_mdata(mfd);
3369 fbi = mfd->fbi;
3370 mdp5_data = mfd_to_mdp5_data(mfd);
3371
3372 if (!mdp5_data || !mdp5_data->ctl)
3373 return;
3374
3375 /*
3376 * Ignore writeback updates through pan_display as output
3377 * buffer is not available.
3378 */
3379 if (mfd->panel_info->type == WRITEBACK_PANEL) {
3380 pr_err_once("writeback update not supported through pan display\n");
3381 return;
3382 }
3383
3384 if (IS_ERR_OR_NULL(mfd->fbmem_buf) || fbi->fix.smem_len == 0 ||
3385 mdp5_data->borderfill_enable) {
3386 if (mdata->handoff_pending) {
3387 /*
3388 * Move pipes to cleanup queue and avoid kickoff if
3389 * pan display is called before handoff is completed.
3390 */
3391 mutex_lock(&mdp5_data->list_lock);
3392 list_for_each_entry_safe(pipe, tmp,
3393 &mdp5_data->pipes_used, list) {
3394 list_move(&pipe->list,
3395 &mdp5_data->pipes_cleanup);
3396 }
3397 mutex_unlock(&mdp5_data->list_lock);
3398 }
3399 mfd->mdp.kickoff_fnc(mfd, NULL);
3400 return;
3401 }
3402
3403 if (mutex_lock_interruptible(&mdp5_data->ov_lock))
3404 return;
3405
3406 if ((mdss_fb_is_power_off(mfd)) &&
3407 !((mfd->dcm_state == DCM_ENTER) &&
3408 (mfd->panel.type == MIPI_CMD_PANEL))) {
3409 mutex_unlock(&mdp5_data->ov_lock);
3410 return;
3411 }
3412
3413 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
3414
3415 bpp = fbi->var.bits_per_pixel / 8;
3416 offset = fbi->var.xoffset * bpp +
3417 fbi->var.yoffset * fbi->fix.line_length;
3418
3419 if (offset > fbi->fix.smem_len) {
3420 pr_err("invalid fb offset=%u total length=%u\n",
3421 offset, fbi->fix.smem_len);
3422 goto clk_disable;
3423 }
3424
3425 ret = mdss_mdp_overlay_get_fb_pipe(mfd, &l_pipe,
3426 MDSS_MDP_MIXER_MUX_LEFT, &l_pipe_allocated);
3427 if (ret) {
3428 pr_err("unable to allocate base pipe\n");
3429 goto iommu_disable;
3430 }
3431
3432 if (mdss_mdp_pipe_map(l_pipe)) {
3433 pr_err("unable to map base pipe\n");
3434 goto pipe_release;
3435 }
3436
3437 ret = mdss_mdp_overlay_start(mfd);
3438 if (ret) {
3439 pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
3440 goto clk_disable;
3441 }
3442
3443 ret = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303444 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303445 pr_err("IOMMU attach failed\n");
3446 goto clk_disable;
3447 }
3448
3449 buf_l = __mdp_overlay_buf_alloc(mfd, l_pipe);
3450 if (!buf_l) {
3451 pr_err("unable to allocate memory for fb buffer\n");
3452 mdss_mdp_pipe_unmap(l_pipe);
3453 goto pipe_release;
3454 }
3455
3456 buf_l->p[0].srcp_table = mfd->fb_table;
3457 buf_l->p[0].srcp_dma_buf = mfd->fbmem_buf;
3458 buf_l->p[0].len = 0;
3459 buf_l->p[0].addr = 0;
3460 buf_l->p[0].offset = offset;
3461 buf_l->p[0].skip_detach = true;
3462 buf_l->p[0].mapped = false;
3463 buf_l->num_planes = 1;
3464
3465 mdss_mdp_pipe_unmap(l_pipe);
3466
3467 if (fbi->var.xres > mdata->max_pipe_width || is_split_lm(mfd)) {
3468 /*
3469 * TODO: Need to revisit the function for panels with width more
3470 * than max_pipe_width and less than max_mixer_width.
3471 */
3472 ret = mdss_mdp_overlay_get_fb_pipe(mfd, &r_pipe,
3473 MDSS_MDP_MIXER_MUX_RIGHT, &r_pipe_allocated);
3474 if (ret) {
3475 pr_err("unable to allocate right base pipe\n");
3476 goto pipe_release;
3477 }
3478
3479 if (mdss_mdp_pipe_map(r_pipe)) {
3480 pr_err("unable to map right base pipe\n");
3481 goto pipe_release;
3482 }
3483
3484 buf_r = __mdp_overlay_buf_alloc(mfd, r_pipe);
3485 if (!buf_r) {
3486 pr_err("unable to allocate memory for fb buffer\n");
3487 mdss_mdp_pipe_unmap(r_pipe);
3488 goto pipe_release;
3489 }
3490
3491 buf_r->p[0] = buf_l->p[0];
3492 buf_r->num_planes = 1;
3493
3494 mdss_mdp_pipe_unmap(r_pipe);
3495 }
3496 mutex_unlock(&mdp5_data->ov_lock);
3497
3498 if ((fbi->var.activate & FB_ACTIVATE_VBL) ||
3499 (fbi->var.activate & FB_ACTIVATE_FORCE))
3500 mfd->mdp.kickoff_fnc(mfd, NULL);
3501
3502 mdss_iommu_ctrl(0);
3503 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
3504 return;
3505
3506pipe_release:
3507 if (r_pipe_allocated)
3508 mdss_mdp_overlay_release(mfd, r_pipe->ndx);
3509 if (buf_l)
3510 __mdp_overlay_buf_free(mfd, buf_l);
3511 if (l_pipe_allocated)
3512 mdss_mdp_overlay_release(mfd, l_pipe->ndx);
3513iommu_disable:
3514 mdss_iommu_ctrl(0);
3515clk_disable:
3516 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
3517 mutex_unlock(&mdp5_data->ov_lock);
3518}
3519
3520static void remove_underrun_vsync_handler(struct work_struct *work)
3521{
3522 int rc;
3523 struct mdss_mdp_ctl *ctl =
3524 container_of(work, typeof(*ctl), remove_underrun_handler);
3525
3526 if (!ctl || !ctl->ops.remove_vsync_handler) {
3527 pr_err("ctl or vsync handler is NULL\n");
3528 return;
3529 }
3530
3531 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
3532 rc = ctl->ops.remove_vsync_handler(ctl,
3533 &ctl->recover_underrun_handler);
3534 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
3535}
3536
3537static void mdss_mdp_recover_underrun_handler(struct mdss_mdp_ctl *ctl,
3538 ktime_t t)
3539{
3540 if (!ctl) {
3541 pr_err("ctl is NULL\n");
3542 return;
3543 }
3544
3545 mdss_mdp_ctl_reset(ctl, true);
3546 schedule_work(&ctl->remove_underrun_handler);
3547}
3548
3549/* do nothing in case of deterministic frame rate control, only keep vsync on */
3550static void mdss_mdp_overlay_frc_handler(struct mdss_mdp_ctl *ctl,
3551 ktime_t t)
3552{
3553 pr_debug("vsync on ctl%d vsync_cnt=%d\n", ctl->num, ctl->vsync_cnt);
3554}
3555
3556/* function is called in irq context should have minimum processing */
3557static void mdss_mdp_overlay_handle_vsync(struct mdss_mdp_ctl *ctl,
3558 ktime_t t)
3559{
3560 struct msm_fb_data_type *mfd = NULL;
3561 struct mdss_overlay_private *mdp5_data = NULL;
3562
3563 if (!ctl) {
3564 pr_err("ctl is NULL\n");
3565 return;
3566 }
3567
3568 mfd = ctl->mfd;
3569 if (!mfd || !mfd->mdp.private1) {
3570 pr_warn("Invalid handle for vsync\n");
3571 return;
3572 }
3573
3574 mdp5_data = mfd_to_mdp5_data(mfd);
3575 if (!mdp5_data) {
3576 pr_err("mdp5_data is NULL\n");
3577 return;
3578 }
3579
3580 pr_debug("vsync on fb%d play_cnt=%d\n", mfd->index, ctl->play_cnt);
3581
3582 mdp5_data->vsync_time = t;
3583 sysfs_notify_dirent(mdp5_data->vsync_event_sd);
3584}
3585
3586/* function is called in irq context should have minimum processing */
3587static void mdss_mdp_overlay_handle_lineptr(struct mdss_mdp_ctl *ctl,
3588 ktime_t t)
3589{
3590 struct mdss_overlay_private *mdp5_data = NULL;
3591
3592 if (!ctl || !ctl->mfd) {
3593 pr_warn("Invalid handle for lineptr\n");
3594 return;
3595 }
3596
3597 mdp5_data = mfd_to_mdp5_data(ctl->mfd);
3598 if (!mdp5_data) {
3599 pr_err("mdp5_data is NULL\n");
3600 return;
3601 }
3602
3603 pr_debug("lineptr irq on fb%d play_cnt=%d\n",
3604 ctl->mfd->index, ctl->play_cnt);
3605
3606 mdp5_data->lineptr_time = t;
3607 sysfs_notify_dirent(mdp5_data->lineptr_event_sd);
3608}
3609
3610int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en)
3611{
3612 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3613 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
3614 int rc;
3615
3616 if (!ctl)
3617 return -ENODEV;
3618
3619 mutex_lock(&mdp5_data->ov_lock);
3620 if (!ctl->ops.add_vsync_handler || !ctl->ops.remove_vsync_handler) {
3621 rc = -EOPNOTSUPP;
3622 pr_err_once("fb%d vsync handlers are not registered\n",
3623 mfd->index);
3624 goto end;
3625 }
3626
Namit Solanki998a73d2018-02-05 14:22:03 +05303627 if (!ctl->panel_data->panel_info.cont_splash_enabled &&
3628 !mdss_mdp_ctl_is_power_on(ctl)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303629 pr_debug("fb%d vsync pending first update en=%d, ctl power state:%d\n",
3630 mfd->index, en, ctl->power_state);
3631 rc = -EPERM;
3632 goto end;
3633 }
3634
3635 pr_debug("fb%d vsync en=%d\n", mfd->index, en);
3636
3637 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
3638 if (en)
3639 rc = ctl->ops.add_vsync_handler(ctl, &ctl->vsync_handler);
3640 else
3641 rc = ctl->ops.remove_vsync_handler(ctl, &ctl->vsync_handler);
3642 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
3643
3644end:
3645 mutex_unlock(&mdp5_data->ov_lock);
3646 return rc;
3647}
3648
3649static ssize_t dynamic_fps_sysfs_rda_dfps(struct device *dev,
3650 struct device_attribute *attr, char *buf)
3651{
3652 ssize_t ret;
3653 struct mdss_panel_data *pdata;
3654 struct fb_info *fbi = dev_get_drvdata(dev);
3655 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
3656 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3657
3658 if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl))
3659 return 0;
3660
3661 pdata = dev_get_platdata(&mfd->pdev->dev);
3662 if (!pdata) {
3663 pr_err("no panel connected for fb%d\n", mfd->index);
3664 return -ENODEV;
3665 }
3666
3667 mutex_lock(&mdp5_data->dfps_lock);
3668 ret = snprintf(buf, PAGE_SIZE, "%d\n",
3669 pdata->panel_info.mipi.frame_rate);
3670 pr_debug("%s: '%d'\n", __func__,
3671 pdata->panel_info.mipi.frame_rate);
3672 mutex_unlock(&mdp5_data->dfps_lock);
3673
3674 return ret;
3675} /* dynamic_fps_sysfs_rda_dfps */
3676
3677static int calc_extra_blanking(struct mdss_panel_data *pdata, u32 new_fps)
3678{
3679 int add_porches, diff;
3680
3681 /* calculate extra: lines for vfp-method, pixels for hfp-method */
3682 diff = abs(pdata->panel_info.default_fps - new_fps);
3683 add_porches = mult_frac(pdata->panel_info.saved_total,
3684 diff, new_fps);
3685
3686 return add_porches;
3687}
3688
3689static void cache_initial_timings(struct mdss_panel_data *pdata)
3690{
3691 if (!pdata->panel_info.default_fps) {
3692
3693 /*
3694 * This value will change dynamically once the
3695 * actual dfps update happen in hw.
3696 */
3697 pdata->panel_info.current_fps =
3698 mdss_panel_get_framerate(&pdata->panel_info,
3699 FPS_RESOLUTION_DEFAULT);
3700
3701 /*
3702 * Keep the initial fps and porch values for this panel before
3703 * any dfps update happen, this is to prevent losing precision
3704 * in further calculations.
3705 */
3706 pdata->panel_info.default_fps =
3707 mdss_panel_get_framerate(&pdata->panel_info,
3708 FPS_RESOLUTION_DEFAULT);
3709
3710 if (pdata->panel_info.dfps_update ==
3711 DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) {
3712 pdata->panel_info.saved_total =
3713 mdss_panel_get_vtotal(&pdata->panel_info);
3714 pdata->panel_info.saved_fporch =
3715 pdata->panel_info.lcdc.v_front_porch;
3716
3717 } else if (pdata->panel_info.dfps_update ==
3718 DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP ||
3719 pdata->panel_info.dfps_update ==
3720 DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP ||
3721 pdata->panel_info.dfps_update ==
3722 DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
3723 pdata->panel_info.saved_total =
3724 mdss_panel_get_htotal(&pdata->panel_info, true);
3725 pdata->panel_info.saved_fporch =
3726 pdata->panel_info.lcdc.h_front_porch;
3727 }
3728 }
3729}
3730
3731static inline void dfps_update_fps(struct mdss_panel_info *pinfo, u32 fps)
3732{
3733 if (pinfo->type == DTV_PANEL)
3734 pinfo->lcdc.frame_rate = fps;
3735 else
3736 pinfo->mipi.frame_rate = fps;
3737}
3738
3739static void dfps_update_panel_params(struct mdss_panel_data *pdata,
3740 struct dynamic_fps_data *data)
3741{
3742 u32 new_fps = data->fps;
3743
3744 /* Keep initial values before any dfps update */
3745 cache_initial_timings(pdata);
3746
3747 if (pdata->panel_info.dfps_update ==
3748 DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) {
3749 int add_v_lines;
3750
3751 /* calculate extra vfp lines */
3752 add_v_lines = calc_extra_blanking(pdata, new_fps);
3753
3754 /* update panel info with new values */
3755 pdata->panel_info.lcdc.v_front_porch =
3756 pdata->panel_info.saved_fporch + add_v_lines;
3757
3758 dfps_update_fps(&pdata->panel_info, new_fps);
3759
3760 pdata->panel_info.prg_fet =
3761 mdss_mdp_get_prefetch_lines(&pdata->panel_info);
3762
3763 } else if (pdata->panel_info.dfps_update ==
3764 DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP) {
3765 int add_h_pixels;
3766
3767 /* calculate extra hfp pixels */
3768 add_h_pixels = calc_extra_blanking(pdata, new_fps);
3769
3770 /* update panel info */
3771 if (pdata->panel_info.default_fps > new_fps)
3772 pdata->panel_info.lcdc.h_front_porch =
3773 pdata->panel_info.saved_fporch + add_h_pixels;
3774 else
3775 pdata->panel_info.lcdc.h_front_porch =
3776 pdata->panel_info.saved_fporch - add_h_pixels;
3777
3778 dfps_update_fps(&pdata->panel_info, new_fps);
3779 } else if (pdata->panel_info.dfps_update ==
3780 DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP) {
3781
3782 pr_debug("hfp=%d, hbp=%d, hpw=%d, clk=%d, fps=%d\n",
3783 data->hfp, data->hbp, data->hpw,
3784 data->clk_rate, data->fps);
3785
3786 pdata->panel_info.lcdc.h_front_porch = data->hfp;
3787 pdata->panel_info.lcdc.h_back_porch = data->hbp;
3788 pdata->panel_info.lcdc.h_pulse_width = data->hpw;
3789
3790 pdata->panel_info.clk_rate = data->clk_rate;
3791 if (pdata->panel_info.type == DTV_PANEL)
3792 pdata->panel_info.clk_rate *= 1000;
3793
3794 dfps_update_fps(&pdata->panel_info, new_fps);
3795 } else if (pdata->panel_info.dfps_update ==
3796 DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
3797
3798 pr_debug("hfp=%d, hbp=%d, hpw=%d, clk=%d, fps=%d\n",
3799 data->hfp, data->hbp, data->hpw,
3800 data->clk_rate, data->fps);
3801
3802 pdata->panel_info.lcdc.h_front_porch = data->hfp;
3803 pdata->panel_info.lcdc.h_back_porch = data->hbp;
3804 pdata->panel_info.lcdc.h_pulse_width = data->hpw;
3805
3806 pdata->panel_info.clk_rate = data->clk_rate;
3807
3808 dfps_update_fps(&pdata->panel_info, new_fps);
3809 mdss_panel_update_clk_rate(&pdata->panel_info, new_fps);
3810 } else {
3811 dfps_update_fps(&pdata->panel_info, new_fps);
3812 mdss_panel_update_clk_rate(&pdata->panel_info, new_fps);
3813 }
3814}
3815
3816int mdss_mdp_dfps_update_params(struct msm_fb_data_type *mfd,
3817 struct mdss_panel_data *pdata, struct dynamic_fps_data *dfps_data)
3818{
3819 struct fb_var_screeninfo *var = &mfd->fbi->var;
3820 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3821 u32 dfps = dfps_data->fps;
3822
3823 mutex_lock(&mdp5_data->dfps_lock);
3824
3825 pr_debug("new_fps:%d\n", dfps);
3826
3827 if (dfps < pdata->panel_info.min_fps) {
3828 pr_err("Unsupported FPS. min_fps = %d\n",
3829 pdata->panel_info.min_fps);
3830 mutex_unlock(&mdp5_data->dfps_lock);
3831 return -EINVAL;
3832 } else if (dfps > pdata->panel_info.max_fps) {
3833 pr_warn("Unsupported FPS. Configuring to max_fps = %d\n",
3834 pdata->panel_info.max_fps);
3835 dfps = pdata->panel_info.max_fps;
3836 dfps_data->fps = dfps;
3837 }
3838
3839 dfps_update_panel_params(pdata, dfps_data);
3840 if (pdata->next)
3841 dfps_update_panel_params(pdata->next, dfps_data);
3842
3843 /*
3844 * Update the panel info in the upstream
3845 * data, so any further call to get the screen
3846 * info has the updated timings.
3847 */
3848 mdss_panelinfo_to_fb_var(&pdata->panel_info, var);
3849
3850 MDSS_XLOG(dfps);
3851 mutex_unlock(&mdp5_data->dfps_lock);
3852
3853 return 0;
3854}
3855
3856
3857static ssize_t dynamic_fps_sysfs_wta_dfps(struct device *dev,
3858 struct device_attribute *attr, const char *buf, size_t count)
3859{
3860 int panel_fps, rc = 0;
3861 struct mdss_panel_data *pdata;
3862 struct fb_info *fbi = dev_get_drvdata(dev);
3863 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
3864 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3865 struct dynamic_fps_data data = {0};
3866
3867 if (!mdp5_data->ctl || !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) {
3868 pr_debug("panel is off\n");
3869 return count;
3870 }
3871
3872 pdata = dev_get_platdata(&mfd->pdev->dev);
3873 if (!pdata) {
3874 pr_err("no panel connected for fb%d\n", mfd->index);
3875 return -ENODEV;
3876 }
3877
3878 if (!pdata->panel_info.dynamic_fps) {
3879 pr_err_once("%s: Dynamic fps not enabled for this panel\n",
3880 __func__);
3881 return -EINVAL;
3882 }
3883
3884 if (pdata->panel_info.dfps_update ==
3885 DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP ||
3886 pdata->panel_info.dfps_update ==
3887 DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) {
3888 if (sscanf(buf, "%u %u %u %u %u",
3889 &data.hfp, &data.hbp, &data.hpw,
3890 &data.clk_rate, &data.fps) != 5) {
3891 pr_err("could not read input\n");
3892 return -EINVAL;
3893 }
3894 } else {
3895 rc = kstrtoint(buf, 10, &data.fps);
3896 if (rc) {
3897 pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
3898 return rc;
3899 }
3900 }
3901
3902 panel_fps = mdss_panel_get_framerate(&pdata->panel_info,
3903 FPS_RESOLUTION_DEFAULT);
3904
3905 if (data.fps == panel_fps) {
3906 pr_debug("%s: FPS is already %d\n",
3907 __func__, data.fps);
3908 return count;
3909 }
3910
3911 if (data.hfp > DFPS_DATA_MAX_HFP || data.hbp > DFPS_DATA_MAX_HBP ||
3912 data.hpw > DFPS_DATA_MAX_HPW || data.fps > DFPS_DATA_MAX_FPS ||
3913 data.clk_rate > DFPS_DATA_MAX_CLK_RATE){
3914 pr_err("Data values out of bound.\n");
3915 return -EINVAL;
3916 }
3917
3918 rc = mdss_mdp_dfps_update_params(mfd, pdata, &data);
3919 if (rc) {
3920 pr_err("failed to set dfps params\n");
3921 return rc;
3922 }
3923
3924 return count;
3925} /* dynamic_fps_sysfs_wta_dfps */
3926
3927
3928static DEVICE_ATTR(dynamic_fps, 0644, dynamic_fps_sysfs_rda_dfps,
3929 dynamic_fps_sysfs_wta_dfps);
3930
3931static struct attribute *dynamic_fps_fs_attrs[] = {
3932 &dev_attr_dynamic_fps.attr,
3933 NULL,
3934};
3935static struct attribute_group dynamic_fps_fs_attrs_group = {
3936 .attrs = dynamic_fps_fs_attrs,
3937};
3938
3939static ssize_t mdss_mdp_vsync_show_event(struct device *dev,
3940 struct device_attribute *attr, char *buf)
3941{
3942 struct fb_info *fbi = dev_get_drvdata(dev);
3943 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
3944 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3945 u64 vsync_ticks;
3946 int ret;
3947
3948 if (!mdp5_data->ctl ||
3949 (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
3950 && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
3951 return -EAGAIN;
3952
3953 vsync_ticks = ktime_to_ns(mdp5_data->vsync_time);
3954
3955 pr_debug("fb%d vsync=%llu\n", mfd->index, vsync_ticks);
3956 ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks);
3957
3958 return ret;
3959}
3960
3961static ssize_t mdss_mdp_lineptr_show_event(struct device *dev,
3962 struct device_attribute *attr, char *buf)
3963{
3964 struct fb_info *fbi = dev_get_drvdata(dev);
3965 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
3966 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3967 u64 lineptr_ticks;
3968 int ret;
3969
3970 if (!mdp5_data->ctl ||
3971 (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
3972 && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
3973 return -EPERM;
3974
3975 lineptr_ticks = ktime_to_ns(mdp5_data->lineptr_time);
3976
3977 pr_debug("fb%d lineptr=%llu\n", mfd->index, lineptr_ticks);
3978 ret = scnprintf(buf, PAGE_SIZE, "LINEPTR=%llu\n", lineptr_ticks);
3979
3980 return ret;
3981}
3982
3983static ssize_t mdss_mdp_lineptr_show_value(struct device *dev,
3984 struct device_attribute *attr, char *buf)
3985{
3986 struct fb_info *fbi = dev_get_drvdata(dev);
3987 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
3988 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3989 int ret, lineptr_val;
3990
3991 if (!mdp5_data->ctl ||
3992 (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
3993 && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
3994 return -EPERM;
3995
3996 lineptr_val = mfd->panel_info->te.wr_ptr_irq;
3997
3998 ret = scnprintf(buf, PAGE_SIZE, "%d\n", lineptr_val);
3999
4000 return ret;
4001}
4002
4003static ssize_t mdss_mdp_lineptr_set_value(struct device *dev,
4004 struct device_attribute *attr, const char *buf, size_t count)
4005{
4006 struct fb_info *fbi = dev_get_drvdata(dev);
4007 struct msm_fb_data_type *mfd = fbi->par;
4008 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4009 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
4010 int ret, lineptr_value;
4011
4012 if (!ctl || (!ctl->panel_data->panel_info.cont_splash_enabled
4013 && !mdss_mdp_ctl_is_power_on(ctl)))
4014 return -EAGAIN;
4015
4016 ret = kstrtoint(buf, 10, &lineptr_value);
4017 if (ret || (lineptr_value < 0)
4018 || (lineptr_value > mfd->panel_info->yres)) {
4019 pr_err("Invalid input for lineptr\n");
4020 return -EINVAL;
4021 }
4022
4023 if (!mdss_mdp_is_lineptr_supported(ctl)) {
4024 pr_err("lineptr not supported\n");
4025 return -ENOTSUPP;
4026 }
4027
4028 mutex_lock(&mdp5_data->ov_lock);
4029 mfd->panel_info->te.wr_ptr_irq = lineptr_value;
4030 if (ctl && ctl->ops.update_lineptr)
4031 ctl->ops.update_lineptr(ctl, true);
4032 mutex_unlock(&mdp5_data->ov_lock);
4033
4034 return count;
4035}
4036
4037static ssize_t mdss_mdp_bl_show_event(struct device *dev,
4038 struct device_attribute *attr, char *buf)
4039{
4040 struct fb_info *fbi = dev_get_drvdata(dev);
4041 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4042 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4043 int ret;
4044
4045 ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->bl_events);
4046 return ret;
4047}
4048
4049static ssize_t mdss_mdp_hist_show_event(struct device *dev,
4050 struct device_attribute *attr, char *buf)
4051{
4052 struct fb_info *fbi = dev_get_drvdata(dev);
4053 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4054 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4055 int ret;
4056
4057 ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->hist_events);
4058 return ret;
4059}
4060
4061static ssize_t mdss_mdp_ad_show_event(struct device *dev,
4062 struct device_attribute *attr, char *buf)
4063{
4064 struct fb_info *fbi = dev_get_drvdata(dev);
4065 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4066 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4067 int ret;
4068
4069 ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->ad_events);
4070 return ret;
4071}
4072
4073static ssize_t mdss_mdp_ad_bl_show_event(struct device *dev,
4074 struct device_attribute *attr, char *buf)
4075{
4076 struct fb_info *fbi = dev_get_drvdata(dev);
4077 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4078 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4079 int ret;
4080
4081 ret = scnprintf(buf, PAGE_SIZE, "%d\n", mdp5_data->ad_bl_events);
4082 return ret;
4083}
4084
4085static inline int mdss_mdp_ad_is_supported(struct msm_fb_data_type *mfd)
4086{
4087 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
4088 struct mdss_mdp_mixer *mixer;
4089
4090 if (!ctl) {
4091 pr_debug("there is no ctl attached to fb\n");
4092 return 0;
4093 }
4094
4095 mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
4096 if (mixer && (mixer->num > ctl->mdata->nad_cfgs)) {
4097 if (!mixer)
4098 pr_warn("there is no mixer attached to fb\n");
4099 else
4100 pr_debug("mixer attached (%d) doesn't support ad\n",
4101 mixer->num);
4102 return 0;
4103 }
4104
4105 mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
4106 if (mixer && (mixer->num > ctl->mdata->nad_cfgs))
4107 return 0;
4108
4109 return 1;
4110}
4111
4112static ssize_t mdss_mdp_ad_show(struct device *dev,
4113 struct device_attribute *attr, char *buf)
4114{
4115 struct fb_info *fbi = dev_get_drvdata(dev);
4116 struct msm_fb_data_type *mfd = fbi->par;
4117 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4118 int ret, state;
4119
4120 state = mdss_mdp_ad_is_supported(mfd) ? mdp5_data->ad_state : -1;
4121
4122 ret = scnprintf(buf, PAGE_SIZE, "%d", state);
4123
4124 return ret;
4125}
4126
4127static ssize_t mdss_mdp_ad_store(struct device *dev,
4128 struct device_attribute *attr, const char *buf, size_t count)
4129{
4130 struct fb_info *fbi = dev_get_drvdata(dev);
4131 struct msm_fb_data_type *mfd = fbi->par;
4132 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4133 int ret, ad;
4134
4135 ret = kstrtoint(buf, 10, &ad);
4136 if (ret) {
4137 pr_err("Invalid input for ad\n");
4138 return -EINVAL;
4139 }
4140
4141 mdp5_data->ad_state = ad;
4142 sysfs_notify(&dev->kobj, NULL, "ad");
4143
4144 return count;
4145}
4146
4147static ssize_t mdss_mdp_dyn_pu_show(struct device *dev,
4148 struct device_attribute *attr, char *buf)
4149{
4150 struct fb_info *fbi = dev_get_drvdata(dev);
4151 struct msm_fb_data_type *mfd = fbi->par;
4152 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4153 int ret, state;
4154
4155 state = (mdp5_data->dyn_pu_state >= 0) ? mdp5_data->dyn_pu_state : -1;
4156
4157 ret = scnprintf(buf, PAGE_SIZE, "%d", state);
4158
4159 return ret;
4160}
4161
4162static ssize_t mdss_mdp_dyn_pu_store(struct device *dev,
4163 struct device_attribute *attr, const char *buf, size_t count)
4164{
4165 struct fb_info *fbi = dev_get_drvdata(dev);
4166 struct msm_fb_data_type *mfd = fbi->par;
4167 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4168 int ret, dyn_pu;
4169
4170 ret = kstrtoint(buf, 10, &dyn_pu);
4171 if (ret) {
4172 pr_err("Invalid input for partial update: ret = %d\n", ret);
4173 return ret;
4174 }
4175
4176 mdp5_data->dyn_pu_state = dyn_pu;
4177 sysfs_notify(&dev->kobj, NULL, "dyn_pu");
4178
4179 return count;
4180}
4181static ssize_t mdss_mdp_cmd_autorefresh_show(struct device *dev,
4182 struct device_attribute *attr, char *buf)
4183{
4184 ssize_t ret = 0;
4185 struct fb_info *fbi = dev_get_drvdata(dev);
4186 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4187 struct mdss_mdp_ctl *ctl;
4188
4189 if (!mfd) {
4190 pr_err("Invalid mfd structure\n");
4191 return -EINVAL;
4192 }
4193
4194 ctl = mfd_to_ctl(mfd);
4195 if (!ctl) {
4196 pr_err("Invalid ctl structure\n");
4197 return -EINVAL;
4198 }
4199
4200
4201 if (mfd->panel_info->type != MIPI_CMD_PANEL) {
4202 pr_err("Panel doesn't support autorefresh\n");
4203 ret = -EINVAL;
4204 } else {
4205 ret = snprintf(buf, PAGE_SIZE, "%d\n",
4206 mdss_mdp_ctl_cmd_get_autorefresh(ctl));
4207 }
4208 return ret;
4209}
4210
4211static ssize_t mdss_mdp_cmd_autorefresh_store(struct device *dev,
4212 struct device_attribute *attr, const char *buf, size_t len)
4213{
4214 int frame_cnt, rc;
4215 struct fb_info *fbi = dev_get_drvdata(dev);
4216 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4217 struct mdss_mdp_ctl *ctl;
4218
4219 if (!mfd) {
4220 pr_err("Invalid mfd structure\n");
4221 rc = -EINVAL;
4222 return rc;
4223 }
4224
4225 ctl = mfd_to_ctl(mfd);
4226 if (!ctl) {
4227 pr_err("Invalid ctl structure\n");
4228 rc = -EINVAL;
4229 return rc;
4230 }
4231
4232 if (mfd->panel_info->type != MIPI_CMD_PANEL) {
4233 pr_err("Panel doesn't support autorefresh\n");
4234 rc = -EINVAL;
4235 return rc;
4236 }
4237
4238 rc = kstrtoint(buf, 10, &frame_cnt);
4239 if (rc) {
4240 pr_err("kstrtoint failed. rc=%d\n", rc);
4241 return rc;
4242 }
4243
4244 rc = mdss_mdp_ctl_cmd_set_autorefresh(ctl, frame_cnt);
4245 if (rc) {
4246 pr_err("cmd_set_autorefresh failed, rc=%d, frame_cnt=%d\n",
4247 rc, frame_cnt);
4248 return rc;
4249 }
4250
4251 if (frame_cnt) {
4252 /* enable/reconfig autorefresh */
4253 mfd->mdp_sync_pt_data.threshold = 2;
4254 mfd->mdp_sync_pt_data.retire_threshold = 0;
4255 } else {
4256 /* disable autorefresh */
4257 mfd->mdp_sync_pt_data.threshold = 1;
4258 mfd->mdp_sync_pt_data.retire_threshold = 1;
4259 }
4260
4261 pr_debug("setting cmd autorefresh to cnt=%d\n", frame_cnt);
4262
4263 return len;
4264}
4265
4266
4267/* Print the last CRC Value read for batch mode */
4268static ssize_t mdss_mdp_misr_show(struct device *dev,
4269 struct device_attribute *attr, char *buf)
4270{
4271 ssize_t ret = 0;
4272 struct fb_info *fbi = dev_get_drvdata(dev);
4273 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4274 struct mdss_mdp_ctl *ctl;
4275
4276 if (!mfd) {
4277 pr_err("Invalid mfd structure\n");
4278 return -EINVAL;
4279 }
4280
4281 ctl = mfd_to_ctl(mfd);
4282 if (!ctl) {
4283 pr_err("Invalid ctl structure\n");
4284 return -EINVAL;
4285 }
4286
4287 ret = mdss_dump_misr_data(&buf, PAGE_SIZE);
4288
4289 return ret;
4290}
4291
4292/*
4293 * Enable crc batch mode. By enabling this mode through sysfs
4294 * driver will keep collecting the misr in ftrace during interrupts,
4295 * until disabled.
4296 */
4297static ssize_t mdss_mdp_misr_store(struct device *dev,
4298 struct device_attribute *attr, const char *buf, size_t len)
4299{
4300 int enable_misr, rc;
4301 struct fb_info *fbi = dev_get_drvdata(dev);
4302 struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
4303 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4304 struct mdss_mdp_ctl *ctl;
4305 struct mdp_misr req, sreq;
4306
4307 if (!mfd) {
4308 pr_err("Invalid mfd structure\n");
4309 rc = -EINVAL;
4310 return rc;
4311 }
4312
4313 ctl = mfd_to_ctl(mfd);
4314 if (!ctl) {
4315 pr_err("Invalid ctl structure\n");
4316 rc = -EINVAL;
4317 return rc;
4318 }
4319
4320 rc = kstrtoint(buf, 10, &enable_misr);
4321 if (rc) {
4322 pr_err("kstrtoint failed. rc=%d\n", rc);
4323 return rc;
4324 }
4325
4326 req.block_id = DISPLAY_MISR_MAX;
4327 sreq.block_id = DISPLAY_MISR_MAX;
4328
4329 pr_debug("intf_type:%d enable:%d\n", ctl->intf_type, enable_misr);
4330 if (ctl->intf_type == MDSS_INTF_DSI) {
4331
4332 req.block_id = DISPLAY_MISR_DSI0;
4333 req.crc_op_mode = MISR_OP_BM;
4334 req.frame_count = 1;
4335 if (is_panel_split(mfd)) {
4336
4337 sreq.block_id = DISPLAY_MISR_DSI1;
4338 sreq.crc_op_mode = MISR_OP_BM;
4339 sreq.frame_count = 1;
4340 }
4341 } else if (ctl->intf_type == MDSS_INTF_HDMI) {
4342
4343 req.block_id = DISPLAY_MISR_HDMI;
4344 req.crc_op_mode = MISR_OP_BM;
4345 req.frame_count = 1;
4346 } else {
4347 pr_err("misr not supported fo this fb:%d\n", mfd->index);
4348 rc = -ENODEV;
4349 return rc;
4350 }
4351
4352 if (enable_misr) {
4353 mdss_misr_set(mdata, &req, ctl);
4354
4355 if ((ctl->intf_type == MDSS_INTF_DSI) && is_panel_split(mfd))
4356 mdss_misr_set(mdata, &sreq, ctl);
4357
4358 } else {
4359 mdss_misr_disable(mdata, &req, ctl);
4360
4361 if ((ctl->intf_type == MDSS_INTF_DSI) && is_panel_split(mfd))
4362 mdss_misr_disable(mdata, &sreq, ctl);
4363 }
4364
4365 pr_debug("misr %s\n", enable_misr ? "enabled" : "disabled");
4366
4367 return len;
4368}
4369
4370static DEVICE_ATTR(msm_misr_en, 0644,
4371 mdss_mdp_misr_show, mdss_mdp_misr_store);
4372static DEVICE_ATTR(msm_cmd_autorefresh_en, 0644,
4373 mdss_mdp_cmd_autorefresh_show, mdss_mdp_cmd_autorefresh_store);
4374static DEVICE_ATTR(vsync_event, 0444, mdss_mdp_vsync_show_event, NULL);
4375static DEVICE_ATTR(lineptr_event, 0444, mdss_mdp_lineptr_show_event, NULL);
4376static DEVICE_ATTR(lineptr_value, 0664,
4377 mdss_mdp_lineptr_show_value, mdss_mdp_lineptr_set_value);
4378static DEVICE_ATTR(ad, 0664, mdss_mdp_ad_show,
4379 mdss_mdp_ad_store);
4380static DEVICE_ATTR(dyn_pu, 0664, mdss_mdp_dyn_pu_show,
4381 mdss_mdp_dyn_pu_store);
4382static DEVICE_ATTR(hist_event, 0444, mdss_mdp_hist_show_event, NULL);
4383static DEVICE_ATTR(bl_event, 0444, mdss_mdp_bl_show_event, NULL);
4384static DEVICE_ATTR(ad_event, 0444, mdss_mdp_ad_show_event, NULL);
4385static DEVICE_ATTR(ad_bl_event, 0444, mdss_mdp_ad_bl_show_event, NULL);
4386
4387static struct attribute *mdp_overlay_sysfs_attrs[] = {
4388 &dev_attr_vsync_event.attr,
4389 &dev_attr_lineptr_event.attr,
4390 &dev_attr_lineptr_value.attr,
4391 &dev_attr_ad.attr,
4392 &dev_attr_dyn_pu.attr,
4393 &dev_attr_msm_misr_en.attr,
4394 &dev_attr_msm_cmd_autorefresh_en.attr,
4395 &dev_attr_hist_event.attr,
4396 &dev_attr_bl_event.attr,
4397 &dev_attr_ad_event.attr,
4398 &dev_attr_ad_bl_event.attr,
4399 NULL,
4400};
4401
4402static struct attribute_group mdp_overlay_sysfs_group = {
4403 .attrs = mdp_overlay_sysfs_attrs,
4404};
4405
4406static void mdss_mdp_hw_cursor_setpos(struct mdss_mdp_mixer *mixer,
4407 struct mdss_rect *roi, u32 start_x, u32 start_y)
4408{
4409 int roi_xy = (roi->y << 16) | roi->x;
4410 int start_xy = (start_y << 16) | start_x;
4411 int roi_size = (roi->h << 16) | roi->w;
4412
4413 if (!mixer) {
4414 pr_err("mixer not available\n");
4415 return;
4416 }
4417 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_XY, roi_xy);
4418 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_START_XY, start_xy);
4419 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
4420}
4421
4422static void mdss_mdp_hw_cursor_setimage(struct mdss_mdp_mixer *mixer,
4423 struct fb_cursor *cursor, u32 cursor_addr, struct mdss_rect *roi)
4424{
4425 int calpha_en, transp_en, alpha, size;
4426 struct fb_image *img = &cursor->image;
4427 u32 blendcfg;
4428 int roi_size = 0;
4429
4430 if (!mixer) {
4431 pr_err("mixer not available\n");
4432 return;
4433 }
4434
4435 if (img->bg_color == 0xffffffff)
4436 transp_en = 0;
4437 else
4438 transp_en = 1;
4439
4440 alpha = (img->fg_color & 0xff000000) >> 24;
4441
4442 if (alpha)
4443 calpha_en = 0x0; /* xrgb */
4444 else
4445 calpha_en = 0x2; /* argb */
4446
4447 roi_size = (roi->h << 16) | roi->w;
4448 size = (img->height << 16) | img->width;
4449 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_IMG_SIZE, size);
4450 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
4451 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_STRIDE,
4452 img->width * 4);
4453 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BASE_ADDR,
4454 cursor_addr);
4455 blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
4456 blendcfg &= ~0x1;
4457 blendcfg |= (transp_en << 3) | (calpha_en << 1);
4458 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
4459 blendcfg);
4460 if (calpha_en)
4461 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_PARAM,
4462 alpha);
4463
4464 if (transp_en) {
4465 mdp_mixer_write(mixer,
4466 MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW0,
4467 ((img->bg_color & 0xff00) << 8) |
4468 (img->bg_color & 0xff));
4469 mdp_mixer_write(mixer,
4470 MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW1,
4471 ((img->bg_color & 0xff0000) >> 16));
4472 mdp_mixer_write(mixer,
4473 MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH0,
4474 ((img->bg_color & 0xff00) << 8) |
4475 (img->bg_color & 0xff));
4476 mdp_mixer_write(mixer,
4477 MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH1,
4478 ((img->bg_color & 0xff0000) >> 16));
4479 }
4480}
4481
4482static void mdss_mdp_hw_cursor_blend_config(struct mdss_mdp_mixer *mixer,
4483 struct fb_cursor *cursor)
4484{
4485 u32 blendcfg;
4486
4487 if (!mixer) {
4488 pr_err("mixer not availbale\n");
4489 return;
4490 }
4491
4492 blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
4493 if (!cursor->enable != !(blendcfg & 0x1)) {
4494 if (cursor->enable) {
4495 pr_debug("enable hw cursor on mixer=%d\n", mixer->num);
4496 blendcfg |= 0x1;
4497 } else {
4498 pr_debug("disable hw cursor on mixer=%d\n", mixer->num);
4499 blendcfg &= ~0x1;
4500 }
4501
4502 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
4503 blendcfg);
4504 mixer->cursor_enabled = cursor->enable;
4505 mixer->params_changed++;
4506 }
4507
4508}
4509
4510static void mdss_mdp_set_rect(struct mdp_rect *rect, u16 x, u16 y, u16 w,
4511 u16 h)
4512{
4513 rect->x = x;
4514 rect->y = y;
4515 rect->w = w;
4516 rect->h = h;
4517}
4518
4519static void mdss_mdp_curor_pipe_cleanup(struct msm_fb_data_type *mfd,
4520 int cursor_pipe)
4521{
4522 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4523
4524 if (mdp5_data->cursor_ndx[cursor_pipe] != MSMFB_NEW_REQUEST) {
4525 mdss_mdp_overlay_release(mfd,
4526 mdp5_data->cursor_ndx[cursor_pipe]);
4527 mdp5_data->cursor_ndx[cursor_pipe] = MSMFB_NEW_REQUEST;
4528 }
4529}
4530
4531int mdss_mdp_cursor_flush(struct msm_fb_data_type *mfd,
4532 struct mdss_mdp_pipe *pipe, int cursor_pipe)
4533{
4534 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4535 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
4536 struct mdss_mdp_ctl *sctl = NULL;
4537 u32 flush_bits = BIT(22 + pipe->num - MDSS_MDP_SSPP_CURSOR0);
4538
4539 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
4540
4541 mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
4542 MDSS_XLOG(ctl->intf_num, flush_bits);
4543 if ((!ctl->split_flush_en) && pipe->mixer_right) {
4544 sctl = mdss_mdp_get_split_ctl(ctl);
4545 if (!sctl) {
4546 pr_err("not able to get the other ctl\n");
4547 return -ENODEV;
4548 }
4549 mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
4550 MDSS_XLOG(sctl->intf_num, flush_bits);
4551 }
4552
4553 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
4554
4555 return 0;
4556}
4557
4558static int mdss_mdp_cursor_pipe_setup(struct msm_fb_data_type *mfd,
4559 struct mdp_overlay *req, int cursor_pipe) {
4560 struct mdss_mdp_pipe *pipe;
4561 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4562 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4563 int ret = 0;
4564 u32 cursor_addr;
4565 struct mdss_mdp_data *buf = NULL;
4566
4567 req->id = mdp5_data->cursor_ndx[cursor_pipe];
4568 ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL, false);
4569 if (ret) {
4570 pr_err("cursor pipe setup failed, cursor_pipe:%d, ret:%d\n",
4571 cursor_pipe, ret);
4572 mdp5_data->cursor_ndx[cursor_pipe] = MSMFB_NEW_REQUEST;
4573 return ret;
4574 }
4575
4576 pr_debug("req id:%d cursor_pipe:%d pnum:%d\n",
4577 req->id, cursor_pipe, pipe->ndx);
4578
4579 if (mdata->mdss_util->iommu_attached()) {
4580 cursor_addr = mfd->cursor_buf_iova;
4581 } else {
4582 if (MDSS_LPAE_CHECK(mfd->cursor_buf_phys)) {
4583 pr_err("can't access phy mem >4GB w/o iommu\n");
4584 ret = -ERANGE;
4585 goto done;
4586 }
4587 cursor_addr = mfd->cursor_buf_phys;
4588 }
4589
4590 buf = __mdp_overlay_buf_alloc(mfd, pipe);
4591 if (!buf) {
4592 pr_err("unable to allocate memory for cursor buffer\n");
4593 ret = -ENOMEM;
4594 goto done;
4595 }
4596 mdp5_data->cursor_ndx[cursor_pipe] = pipe->ndx;
4597 buf->p[0].addr = cursor_addr;
4598 buf->p[0].len = mdss_mdp_get_cursor_frame_size(mdata);
4599 buf->num_planes = 1;
4600
4601 buf->state = MDP_BUF_STATE_ACTIVE;
4602 if (!(req->flags & MDP_SOLID_FILL))
4603 ret = mdss_mdp_pipe_queue_data(pipe, buf);
4604 else
4605 ret = mdss_mdp_pipe_queue_data(pipe, NULL);
4606
4607 if (ret) {
4608 pr_err("cursor pipe queue data failed in async mode\n");
4609 return ret;
4610 }
4611
4612 ret = mdss_mdp_cursor_flush(mfd, pipe, cursor_pipe);
4613done:
4614 if (ret && mdp5_data->cursor_ndx[cursor_pipe] == MSMFB_NEW_REQUEST)
4615 mdss_mdp_overlay_release(mfd, pipe->ndx);
4616
4617 return ret;
4618}
4619
4620static int mdss_mdp_hw_cursor_pipe_update(struct msm_fb_data_type *mfd,
4621 struct fb_cursor *cursor)
4622{
4623 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4624 struct mdss_mdp_mixer *mixer;
4625 struct fb_image *img = &cursor->image;
4626 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4627 struct mdp_overlay *req = NULL;
4628 struct mdss_rect roi;
4629 int ret = 0;
4630 struct fb_var_screeninfo *var = &mfd->fbi->var;
4631 u32 xres = var->xres;
4632 u32 yres = var->yres;
4633 u32 start_x = img->dx;
4634 u32 start_y = img->dy;
4635 u32 left_lm_w = left_lm_w_from_mfd(mfd);
4636 struct platform_device *pdev = mfd->pdev;
4637 u32 cursor_frame_size = mdss_mdp_get_cursor_frame_size(mdata);
4638
4639 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
4640 if (ret)
4641 return ret;
4642
4643 if (mdss_fb_is_power_off(mfd)) {
4644 ret = -EPERM;
4645 goto done;
4646 }
4647
4648 if (!cursor->enable) {
4649 mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_LEFT);
4650 mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_RIGHT);
4651 goto done;
4652 }
4653
4654 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
4655 if (!mixer) {
4656 ret = -ENODEV;
4657 goto done;
4658 }
4659
4660 if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
4661 ret = mdss_smmu_dma_alloc_coherent(&pdev->dev,
4662 cursor_frame_size, (dma_addr_t *) &mfd->cursor_buf_phys,
4663 &mfd->cursor_buf_iova, &mfd->cursor_buf,
4664 GFP_KERNEL, MDSS_IOMMU_DOMAIN_UNSECURE);
4665 if (ret) {
4666 pr_err("can't allocate cursor buffer rc:%d\n", ret);
4667 goto done;
4668 }
4669
4670 mixer->cursor_hotx = 0;
4671 mixer->cursor_hoty = 0;
4672 }
4673
4674 pr_debug("mixer=%d enable=%x set=%x\n", mixer->num, cursor->enable,
4675 cursor->set);
4676
4677 if (cursor->set & FB_CUR_SETHOT) {
4678 if ((cursor->hot.x < img->width) &&
4679 (cursor->hot.y < img->height)) {
4680 mixer->cursor_hotx = cursor->hot.x;
4681 mixer->cursor_hoty = cursor->hot.y;
4682 /* Update cursor position */
4683 cursor->set |= FB_CUR_SETPOS;
4684 } else {
4685 pr_err("Invalid cursor hotspot coordinates\n");
4686 ret = -EINVAL;
4687 goto done;
4688 }
4689 }
4690
4691 memset(&roi, 0, sizeof(struct mdss_rect));
4692 if (start_x > mixer->cursor_hotx) {
4693 start_x -= mixer->cursor_hotx;
4694 } else {
4695 roi.x = mixer->cursor_hotx - start_x;
4696 start_x = 0;
4697 }
4698 if (start_y > mixer->cursor_hoty) {
4699 start_y -= mixer->cursor_hoty;
4700 } else {
4701 roi.y = mixer->cursor_hoty - start_y;
4702 start_y = 0;
4703 }
4704
4705 if ((img->width > mdata->max_cursor_size) ||
4706 (img->height > mdata->max_cursor_size) ||
4707 (img->depth != 32) || (start_x >= xres) ||
4708 (start_y >= yres)) {
4709 pr_err("Invalid cursor image coordinates\n");
4710 ret = -EINVAL;
4711 goto done;
4712 }
4713
4714 roi.w = min(xres - start_x, img->width - roi.x);
4715 roi.h = min(yres - start_y, img->height - roi.y);
4716
4717 if ((roi.w > mdata->max_cursor_size) ||
4718 (roi.h > mdata->max_cursor_size)) {
4719 pr_err("Invalid cursor ROI size\n");
4720 ret = -EINVAL;
4721 goto done;
4722 }
4723
4724 req = kcalloc(1, sizeof(struct mdp_overlay), GFP_KERNEL);
4725 if (!req) {
4726 ret = -ENOMEM;
4727 goto done;
4728 }
4729
4730 req->pipe_type = PIPE_TYPE_CURSOR;
4731 req->z_order = HW_CURSOR_STAGE(mdata);
4732
4733 req->src.width = img->width;
4734 req->src.height = img->height;
4735 req->src.format = mfd->fb_imgType;
4736
4737 mdss_mdp_set_rect(&req->src_rect, roi.x, roi.y, roi.w, roi.h);
4738 mdss_mdp_set_rect(&req->dst_rect, start_x, start_y, roi.w, roi.h);
4739
4740 req->bg_color = img->bg_color;
4741 req->alpha = (img->fg_color >> ((32 - var->transp.offset) - 8)) & 0xff;
4742 if (req->alpha)
4743 req->blend_op = BLEND_OP_PREMULTIPLIED;
4744 else
4745 req->blend_op = BLEND_OP_COVERAGE;
4746 req->transp_mask = img->bg_color & ~(0xff << var->transp.offset);
4747
4748 if (mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
4749 ret = copy_from_user(mfd->cursor_buf, img->data,
4750 img->width * img->height * 4);
4751 if (ret) {
4752 pr_err("copy_from_user error. rc=%d\n", ret);
4753 goto done;
4754 }
4755
4756 mixer->cursor_hotx = 0;
4757 mixer->cursor_hoty = 0;
4758 }
4759
4760 /*
4761 * When source split is enabled, only CURSOR_PIPE_LEFT is used,
4762 * with both mixers of the pipe staged all the time.
4763 * When source split is disabled, 2 pipes are staged, with one
4764 * pipe containing the actual data and another one a transparent
4765 * solid fill when the data falls only in left or right dsi.
4766 * Both are done to support async cursor functionality.
4767 */
4768 if (mdata->has_src_split || (!is_split_lm(mfd))
4769 || (mdata->ncursor_pipes == 1)) {
4770 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
4771 } else if ((start_x + roi.w) <= left_lm_w) {
4772 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
4773 if (ret)
4774 goto done;
4775 req->bg_color = 0;
4776 req->flags |= MDP_SOLID_FILL;
4777 req->dst_rect.x = left_lm_w;
4778 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
4779 } else if (start_x >= left_lm_w) {
4780 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
4781 if (ret)
4782 goto done;
4783 req->bg_color = 0;
4784 req->flags |= MDP_SOLID_FILL;
4785 req->dst_rect.x = 0;
4786 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
4787 } else if ((start_x <= left_lm_w) && ((start_x + roi.w) >= left_lm_w)) {
4788 mdss_mdp_set_rect(&req->dst_rect, start_x, start_y,
4789 (left_lm_w - start_x), roi.h);
4790 mdss_mdp_set_rect(&req->src_rect, 0, 0, (left_lm_w -
4791 start_x), roi.h);
4792 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_LEFT);
4793 if (ret)
4794 goto done;
4795
4796 mdss_mdp_set_rect(&req->dst_rect, left_lm_w, start_y, ((start_x
4797 + roi.w) - left_lm_w), roi.h);
4798 mdss_mdp_set_rect(&req->src_rect, (left_lm_w - start_x), 0,
4799 (roi.w - (left_lm_w - start_x)), roi.h);
4800 ret = mdss_mdp_cursor_pipe_setup(mfd, req, CURSOR_PIPE_RIGHT);
4801 } else {
4802 pr_err("Invalid case for cursor pipe setup\n");
4803 ret = -EINVAL;
4804 }
4805
4806done:
4807 if (ret) {
4808 mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_LEFT);
4809 mdss_mdp_curor_pipe_cleanup(mfd, CURSOR_PIPE_RIGHT);
4810 }
4811
4812 kfree(req);
4813 mutex_unlock(&mdp5_data->ov_lock);
4814 return ret;
4815}
4816
4817static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
4818 struct fb_cursor *cursor)
4819{
4820 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
4821 struct mdss_mdp_mixer *mixer_left = NULL;
4822 struct mdss_mdp_mixer *mixer_right = NULL;
4823 struct fb_image *img = &cursor->image;
4824 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4825 struct fbcurpos cursor_hot;
4826 struct mdss_rect roi;
4827 int ret = 0;
4828 u32 xres = mfd->fbi->var.xres;
4829 u32 yres = mfd->fbi->var.yres;
4830 u32 start_x = img->dx;
4831 u32 start_y = img->dy;
4832 u32 left_lm_w = left_lm_w_from_mfd(mfd);
4833 struct platform_device *pdev = mfd->pdev;
4834 u32 cursor_frame_size = mdss_mdp_get_cursor_frame_size(mdata);
4835
4836 mixer_left = mdss_mdp_mixer_get(mdp5_data->ctl,
4837 MDSS_MDP_MIXER_MUX_DEFAULT);
4838 if (!mixer_left)
4839 return -ENODEV;
4840 if (is_split_lm(mfd)) {
4841 mixer_right = mdss_mdp_mixer_get(mdp5_data->ctl,
4842 MDSS_MDP_MIXER_MUX_RIGHT);
4843 if (!mixer_right)
4844 return -ENODEV;
4845 }
4846
4847 if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
4848 ret = mdss_smmu_dma_alloc_coherent(&pdev->dev,
4849 cursor_frame_size, (dma_addr_t *) &mfd->cursor_buf_phys,
4850 &mfd->cursor_buf_iova, &mfd->cursor_buf,
4851 GFP_KERNEL, MDSS_IOMMU_DOMAIN_UNSECURE);
4852 if (ret) {
4853 pr_err("can't allocate cursor buffer rc:%d\n", ret);
4854 return ret;
4855 }
4856 }
4857
4858 if ((img->width > mdata->max_cursor_size) ||
4859 (img->height > mdata->max_cursor_size) ||
4860 (img->depth != 32) || (start_x >= xres) || (start_y >= yres))
4861 return -EINVAL;
4862
4863 pr_debug("enable=%x set=%x\n", cursor->enable, cursor->set);
4864
4865 memset(&cursor_hot, 0, sizeof(struct fbcurpos));
4866 memset(&roi, 0, sizeof(struct mdss_rect));
4867 if (cursor->set & FB_CUR_SETHOT) {
4868 if ((cursor->hot.x < img->width) &&
4869 (cursor->hot.y < img->height)) {
4870 cursor_hot.x = cursor->hot.x;
4871 cursor_hot.y = cursor->hot.y;
4872 /* Update cursor position */
4873 cursor->set |= FB_CUR_SETPOS;
4874 } else {
4875 pr_err("Invalid cursor hotspot coordinates\n");
4876 return -EINVAL;
4877 }
4878 }
4879
4880 if (start_x > cursor_hot.x) {
4881 start_x -= cursor_hot.x;
4882 } else {
4883 roi.x = cursor_hot.x - start_x;
4884 start_x = 0;
4885 }
4886 if (start_y > cursor_hot.y) {
4887 start_y -= cursor_hot.y;
4888 } else {
4889 roi.y = cursor_hot.y - start_y;
4890 start_y = 0;
4891 }
4892
4893 roi.w = min(xres - start_x, img->width - roi.x);
4894 roi.h = min(yres - start_y, img->height - roi.y);
4895
4896 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
4897
4898 if (mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
4899 u32 cursor_addr;
4900
4901 ret = copy_from_user(mfd->cursor_buf, img->data,
4902 img->width * img->height * 4);
4903 if (ret) {
4904 pr_err("copy_from_user error. rc=%d\n", ret);
4905 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
4906 return ret;
4907 }
4908
4909 if (mdata->mdss_util->iommu_attached()) {
4910 cursor_addr = mfd->cursor_buf_iova;
4911 } else {
4912 if (MDSS_LPAE_CHECK(mfd->cursor_buf_phys)) {
4913 pr_err("can't access phy mem >4GB w/o iommu\n");
4914 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
4915 return -ERANGE;
4916 }
4917 cursor_addr = mfd->cursor_buf_phys;
4918 }
4919 mdss_mdp_hw_cursor_setimage(mixer_left, cursor, cursor_addr,
4920 &roi);
4921 if (is_split_lm(mfd))
4922 mdss_mdp_hw_cursor_setimage(mixer_right, cursor,
4923 cursor_addr, &roi);
4924 }
4925
4926 if ((start_x + roi.w) <= left_lm_w) {
4927 if (cursor->set & FB_CUR_SETPOS)
4928 mdss_mdp_hw_cursor_setpos(mixer_left, &roi, start_x,
4929 start_y);
4930 mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
4931 cursor->enable = false;
4932 mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
4933 } else if (start_x >= left_lm_w) {
4934 start_x -= left_lm_w;
4935 if (cursor->set & FB_CUR_SETPOS)
4936 mdss_mdp_hw_cursor_setpos(mixer_right, &roi, start_x,
4937 start_y);
4938 mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
4939 cursor->enable = false;
4940 mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
4941 } else {
4942 struct mdss_rect roi_right = roi;
4943
4944 roi.w = left_lm_w - start_x;
4945 if (cursor->set & FB_CUR_SETPOS)
4946 mdss_mdp_hw_cursor_setpos(mixer_left, &roi, start_x,
4947 start_y);
4948 mdss_mdp_hw_cursor_blend_config(mixer_left, cursor);
4949
4950 roi_right.x = 0;
4951 roi_right.w = (start_x + roi_right.w) - left_lm_w;
4952 start_x = 0;
4953 if (cursor->set & FB_CUR_SETPOS)
4954 mdss_mdp_hw_cursor_setpos(mixer_right, &roi_right,
4955 start_x, start_y);
4956 mdss_mdp_hw_cursor_blend_config(mixer_right, cursor);
4957 }
4958
4959 mixer_left->ctl->flush_bits |= BIT(6) << mixer_left->num;
4960 if (is_split_lm(mfd))
4961 mixer_right->ctl->flush_bits |= BIT(6) << mixer_right->num;
4962 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
4963 return 0;
4964}
4965
4966static int mdss_bl_scale_config(struct msm_fb_data_type *mfd,
4967 struct mdp_bl_scale_data *data)
4968{
4969 int ret = 0;
4970 int curr_bl;
4971
4972 mutex_lock(&mfd->bl_lock);
4973 curr_bl = mfd->bl_level;
4974 mfd->bl_scale = data->scale;
4975 mfd->bl_min_lvl = data->min_lvl;
4976 pr_debug("update scale = %d, min_lvl = %d\n", mfd->bl_scale,
4977 mfd->bl_min_lvl);
4978
4979 /* Update current backlight to use new scaling, if it is not zero */
4980 if (curr_bl)
4981 mdss_fb_set_backlight(mfd, curr_bl);
4982
4983 mutex_unlock(&mfd->bl_lock);
4984 return ret;
4985}
4986
4987static int mdss_mdp_pp_ioctl(struct msm_fb_data_type *mfd,
4988 void __user *argp)
4989{
4990 int ret;
4991 struct msmfb_mdp_pp mdp_pp;
4992 u32 copyback = 0;
4993 u32 copy_from_kernel = 0;
4994
4995 ret = copy_from_user(&mdp_pp, argp, sizeof(mdp_pp));
4996 if (ret)
4997 return ret;
4998
4999 /* Supprt only MDP register read/write and
5000 * exit_dcm in DCM state
5001 */
5002 if (mfd->dcm_state == DCM_ENTER &&
5003 (mdp_pp.op != mdp_op_calib_buffer &&
5004 mdp_pp.op != mdp_op_calib_dcm_state))
5005 return -EPERM;
5006
5007 switch (mdp_pp.op) {
5008 case mdp_op_pa_cfg:
5009 ret = mdss_mdp_pa_config(mfd, &mdp_pp.data.pa_cfg_data,
5010 &copyback);
5011 break;
5012
5013 case mdp_op_pa_v2_cfg:
5014 ret = mdss_mdp_pa_v2_config(mfd, &mdp_pp.data.pa_v2_cfg_data,
5015 &copyback);
5016 break;
5017
5018 case mdp_op_pcc_cfg:
5019 ret = mdss_mdp_pcc_config(mfd, &mdp_pp.data.pcc_cfg_data,
5020 &copyback);
5021 break;
5022
5023 case mdp_op_lut_cfg:
5024 switch (mdp_pp.data.lut_cfg_data.lut_type) {
5025 case mdp_lut_igc:
5026 ret = mdss_mdp_igc_lut_config(mfd,
5027 (struct mdp_igc_lut_data *)
5028 &mdp_pp.data.lut_cfg_data.data,
5029 &copyback, copy_from_kernel);
5030 break;
5031
5032 case mdp_lut_pgc:
5033 ret = mdss_mdp_argc_config(mfd,
5034 &mdp_pp.data.lut_cfg_data.data.pgc_lut_data,
5035 &copyback);
5036 break;
5037
5038 case mdp_lut_hist:
5039 ret = mdss_mdp_hist_lut_config(mfd,
5040 (struct mdp_hist_lut_data *)
5041 &mdp_pp.data.lut_cfg_data.data, &copyback);
5042 break;
5043
5044 default:
5045 ret = -ENOTSUPP;
5046 break;
5047 }
5048 break;
5049 case mdp_op_dither_cfg:
5050 ret = mdss_mdp_dither_config(mfd,
5051 &mdp_pp.data.dither_cfg_data,
5052 &copyback,
5053 false);
5054 break;
5055 case mdp_op_gamut_cfg:
5056 ret = mdss_mdp_gamut_config(mfd,
5057 &mdp_pp.data.gamut_cfg_data,
5058 &copyback);
5059 break;
5060 case mdp_bl_scale_cfg:
5061 ret = mdss_bl_scale_config(mfd, (struct mdp_bl_scale_data *)
5062 &mdp_pp.data.bl_scale_data);
5063 break;
5064 case mdp_op_ad_cfg:
5065 ret = mdss_mdp_ad_config(mfd, &mdp_pp.data.ad_init_cfg);
5066 break;
5067 case mdp_op_ad_input:
5068 ret = mdss_mdp_ad_input(mfd, &mdp_pp.data.ad_input, 1);
5069 if (ret > 0) {
5070 ret = 0;
5071 copyback = 1;
5072 }
5073 break;
5074 case mdp_op_calib_cfg:
5075 ret = mdss_mdp_calib_config((struct mdp_calib_config_data *)
5076 &mdp_pp.data.calib_cfg, &copyback);
5077 break;
5078 case mdp_op_calib_mode:
5079 ret = mdss_mdp_calib_mode(mfd, &mdp_pp.data.mdss_calib_cfg);
5080 break;
5081 case mdp_op_calib_buffer:
5082 ret = mdss_mdp_calib_config_buffer(
5083 (struct mdp_calib_config_buffer *)
5084 &mdp_pp.data.calib_buffer, &copyback);
5085 break;
5086 case mdp_op_calib_dcm_state:
5087 ret = mdss_fb_dcm(mfd, mdp_pp.data.calib_dcm.dcm_state);
5088 break;
5089 default:
5090 pr_err("Unsupported request to MDP_PP IOCTL. %d = op\n",
5091 mdp_pp.op);
5092 ret = -EINVAL;
5093 break;
5094 }
5095 if ((ret == 0) && copyback)
5096 ret = copy_to_user(argp, &mdp_pp, sizeof(struct msmfb_mdp_pp));
5097 return ret;
5098}
5099
5100static int mdss_mdp_histo_ioctl(struct msm_fb_data_type *mfd, u32 cmd,
5101 void __user *argp)
5102{
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305103 int ret = -ENOTSUPP;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305104 struct mdp_histogram_data hist;
5105 struct mdp_histogram_start_req hist_req;
5106 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
5107 u32 block;
5108
5109 if (!mdata)
5110 return -EPERM;
5111
5112 switch (cmd) {
5113 case MSMFB_HISTOGRAM_START:
5114 if (mdss_fb_is_power_off(mfd))
5115 return -EPERM;
5116
5117 ret = copy_from_user(&hist_req, argp, sizeof(hist_req));
5118 if (ret)
5119 return ret;
5120
5121 ret = mdss_mdp_hist_start(&hist_req);
5122 break;
5123
5124 case MSMFB_HISTOGRAM_STOP:
5125 ret = copy_from_user(&block, argp, sizeof(int));
5126 if (ret)
5127 return ret;
5128
5129 ret = mdss_mdp_hist_stop(block);
5130 if (ret)
5131 return ret;
5132 break;
5133
5134 case MSMFB_HISTOGRAM:
5135 if (mdss_fb_is_power_off(mfd)) {
5136 pr_err("mfd is turned off MSMFB_HISTOGRAM failed\n");
5137 return -EPERM;
5138 }
5139
5140 ret = copy_from_user(&hist, argp, sizeof(hist));
5141 if (ret)
5142 return ret;
5143
5144 ret = mdss_mdp_hist_collect(&hist);
5145 if (!ret)
5146 ret = copy_to_user(argp, &hist, sizeof(hist));
5147 break;
5148 default:
5149 break;
5150 }
5151 return ret;
5152}
5153
5154static int mdss_fb_set_metadata(struct msm_fb_data_type *mfd,
5155 struct msmfb_metadata *metadata)
5156{
5157 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
5158 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
5159 int ret = 0;
5160
5161 if (!ctl)
5162 return -EPERM;
5163 switch (metadata->op) {
5164 case metadata_op_vic:
5165 if (mfd->panel_info)
5166 mfd->panel_info->vic =
5167 metadata->data.video_info_code;
5168 else
5169 ret = -EINVAL;
5170 break;
5171 case metadata_op_crc:
5172 if (mdss_fb_is_power_off(mfd))
5173 return -EPERM;
5174 ret = mdss_misr_set(mdata, &metadata->data.misr_request, ctl);
5175 break;
5176 default:
5177 pr_warn("unsupported request to MDP META IOCTL\n");
5178 ret = -EINVAL;
5179 break;
5180 }
5181 return ret;
5182}
5183
5184static int mdss_fb_get_hw_caps(struct msm_fb_data_type *mfd,
5185 struct mdss_hw_caps *caps)
5186{
5187 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
5188
5189 caps->mdp_rev = mdata->mdp_rev;
5190 caps->vig_pipes = mdata->nvig_pipes;
5191 caps->rgb_pipes = mdata->nrgb_pipes;
5192 caps->dma_pipes = mdata->ndma_pipes;
5193 if (mdata->has_bwc)
5194 caps->features |= MDP_BWC_EN;
5195 if (mdata->has_decimation)
5196 caps->features |= MDP_DECIMATION_EN;
5197
5198 if (mdata->smp_mb_cnt) {
5199 caps->max_smp_cnt = mdata->smp_mb_cnt;
5200 caps->smp_per_pipe = mdata->smp_mb_per_pipe;
5201 }
5202
5203 return 0;
5204}
5205
5206static int mdss_fb_get_metadata(struct msm_fb_data_type *mfd,
5207 struct msmfb_metadata *metadata)
5208{
5209 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
5210 struct mdss_mdp_ctl *ctl = NULL;
5211 int ret = 0;
5212
5213 switch (metadata->op) {
5214 case metadata_op_frame_rate:
5215 metadata->data.panel_frame_rate =
5216 mdss_panel_get_framerate(mfd->panel_info,
5217 FPS_RESOLUTION_DEFAULT);
5218 pr_debug("current fps:%d\n", metadata->data.panel_frame_rate);
5219 break;
5220 case metadata_op_get_caps:
5221 ret = mdss_fb_get_hw_caps(mfd, &metadata->data.caps);
5222 break;
5223 case metadata_op_get_ion_fd:
5224 if (mfd->fb_ion_handle && mfd->fb_ion_client) {
5225 get_dma_buf(mfd->fbmem_buf);
5226 metadata->data.fbmem_ionfd =
5227 ion_share_dma_buf_fd(mfd->fb_ion_client,
5228 mfd->fb_ion_handle);
5229 if (metadata->data.fbmem_ionfd < 0) {
5230 dma_buf_put(mfd->fbmem_buf);
5231 pr_err("fd allocation failed. fd = %d\n",
5232 metadata->data.fbmem_ionfd);
5233 }
5234 }
5235 break;
5236 case metadata_op_crc:
5237 ctl = mfd_to_ctl(mfd);
5238 if (!ctl || mdss_fb_is_power_off(mfd))
5239 return -EPERM;
5240 ret = mdss_misr_get(mdata, &metadata->data.misr_request, ctl,
5241 ctl->is_video_mode);
5242 break;
5243 default:
5244 pr_warn("Unsupported request to MDP META IOCTL.\n");
5245 ret = -EINVAL;
5246 break;
5247 }
5248 return ret;
5249}
5250
5251static int __mdss_mdp_clean_dirty_pipes(struct msm_fb_data_type *mfd)
5252{
5253 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
5254 struct mdss_mdp_pipe *pipe;
5255 int unset_ndx = 0;
5256
5257 mutex_lock(&mdp5_data->list_lock);
5258 list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
5259 if (pipe->dirty)
5260 unset_ndx |= pipe->ndx;
5261 }
5262 mutex_unlock(&mdp5_data->list_lock);
5263 if (unset_ndx)
5264 mdss_mdp_overlay_release(mfd, unset_ndx);
5265
5266 return unset_ndx;
5267}
5268
5269static int mdss_mdp_overlay_precommit(struct msm_fb_data_type *mfd)
5270{
5271 struct mdss_overlay_private *mdp5_data;
5272 int ret;
5273
5274 if (!mfd)
5275 return -ENODEV;
5276
5277 mdp5_data = mfd_to_mdp5_data(mfd);
5278 if (!mdp5_data)
5279 return -ENODEV;
5280
5281 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
5282 if (ret)
5283 return ret;
5284
5285 /*
5286 * we can assume that any pipes that are still dirty at this point are
5287 * not properly tracked by user land. This could be for any reason,
5288 * mark them for cleanup at this point.
5289 */
5290 ret = __mdss_mdp_clean_dirty_pipes(mfd);
5291 if (ret) {
5292 pr_warn("fb%d: dirty pipes remaining %x\n",
5293 mfd->index, ret);
5294 ret = -EPIPE;
5295 }
5296
5297 /*
5298 * If we are in process of mode switch we may have an invalid state.
5299 * We can allow commit to happen if there are no pipes attached as only
5300 * border color will be seen regardless of resolution or mode.
5301 */
5302 if ((mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) &&
5303 (mfd->switch_state != MDSS_MDP_WAIT_FOR_COMMIT)) {
5304 if (list_empty(&mdp5_data->pipes_used)) {
5305 mfd->switch_state = MDSS_MDP_WAIT_FOR_COMMIT;
5306 } else {
5307 pr_warn("Invalid commit on fb%d with state=%d\n",
5308 mfd->index, mfd->switch_state);
5309 ret = -EINVAL;
5310 }
5311 }
5312 mutex_unlock(&mdp5_data->ov_lock);
5313
5314 return ret;
5315}
5316
5317/*
5318 * This routine serves two purposes.
5319 * 1. Propagate overlay_id returned from sorted list to original list
5320 * to user-space.
5321 * 2. In case of error processing sorted list, map the error overlay's
5322 * index to original list because user-space is not aware of the sorted list.
5323 */
5324static int __mdss_overlay_map(struct mdp_overlay *ovs,
5325 struct mdp_overlay *op_ovs, int num_ovs, int num_ovs_processed)
5326{
5327 int mapped = num_ovs_processed;
5328 int j, k;
5329
5330 for (j = 0; j < num_ovs; j++) {
5331 for (k = 0; k < num_ovs; k++) {
5332 if ((ovs[j].dst_rect.x == op_ovs[k].dst_rect.x) &&
5333 (ovs[j].z_order == op_ovs[k].z_order)) {
5334 op_ovs[k].id = ovs[j].id;
5335 op_ovs[k].priority = ovs[j].priority;
5336 break;
5337 }
5338 }
5339
5340 if ((mapped != num_ovs) && (mapped == j)) {
5341 pr_debug("mapped %d->%d\n", mapped, k);
5342 mapped = k;
5343 }
5344 }
5345
5346 return mapped;
5347}
5348
5349static inline void __overlay_swap_func(void *a, void *b, int size)
5350{
5351 swap(*(struct mdp_overlay *)a, *(struct mdp_overlay *)b);
5352}
5353
5354static inline int __zorder_dstx_cmp_func(const void *a, const void *b)
5355{
5356 int rc = 0;
5357 const struct mdp_overlay *ov1 = a;
5358 const struct mdp_overlay *ov2 = b;
5359
5360 if (ov1->z_order < ov2->z_order)
5361 rc = -1;
5362 else if ((ov1->z_order == ov2->z_order) &&
5363 (ov1->dst_rect.x < ov2->dst_rect.x))
5364 rc = -1;
5365
5366 return rc;
5367}
5368
5369/*
5370 * first sort list of overlays based on z_order and then within
5371 * same z_order sort them on dst_x.
5372 */
5373static int __mdss_overlay_src_split_sort(struct msm_fb_data_type *mfd,
5374 struct mdp_overlay *ovs, int num_ovs)
5375{
5376 int i;
5377 int left_lm_zo_cnt[MDSS_MDP_MAX_STAGE] = {0};
5378 int right_lm_zo_cnt[MDSS_MDP_MAX_STAGE] = {0};
5379 u32 left_lm_w = left_lm_w_from_mfd(mfd);
5380
5381 sort(ovs, num_ovs, sizeof(struct mdp_overlay), __zorder_dstx_cmp_func,
5382 __overlay_swap_func);
5383
5384 for (i = 0; i < num_ovs; i++) {
5385 if (ovs[i].z_order >= MDSS_MDP_MAX_STAGE) {
5386 pr_err("invalid stage:%u\n", ovs[i].z_order);
5387 return -EINVAL;
5388 }
5389 if (ovs[i].dst_rect.x < left_lm_w) {
5390 if (left_lm_zo_cnt[ovs[i].z_order] == 2) {
5391 pr_err("more than 2 ov @ stage%u on left lm\n",
5392 ovs[i].z_order);
5393 return -EINVAL;
5394 }
5395 left_lm_zo_cnt[ovs[i].z_order]++;
5396 } else {
5397 if (right_lm_zo_cnt[ovs[i].z_order] == 2) {
5398 pr_err("more than 2 ov @ stage%u on right lm\n",
5399 ovs[i].z_order);
5400 return -EINVAL;
5401 }
5402 right_lm_zo_cnt[ovs[i].z_order]++;
5403 }
5404 }
5405
5406 return 0;
5407}
5408
5409static int __handle_overlay_prepare(struct msm_fb_data_type *mfd,
5410 struct mdp_overlay_list *ovlist, struct mdp_overlay *ip_ovs)
5411{
5412 int ret, i;
5413 int new_reqs = 0, left_cnt = 0, right_cnt = 0;
5414 int num_ovs = ovlist->num_overlays;
5415 u32 left_lm_w = left_lm_w_from_mfd(mfd);
5416 u32 left_lm_ovs = 0, right_lm_ovs = 0;
5417 bool is_single_layer = false;
5418
5419 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
5420 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
5421
5422 struct mdp_overlay *sorted_ovs = NULL;
5423 struct mdp_overlay *req, *prev_req;
5424
5425 struct mdss_mdp_pipe *pipe, *left_blend_pipe;
5426 struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM] = { 0 };
5427 struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM] = { 0 };
5428
5429 bool sort_needed = mdata->has_src_split && (num_ovs > 1);
5430
5431 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
5432 if (ret)
5433 return ret;
5434
5435 if (mdss_fb_is_power_off(mfd)) {
5436 mutex_unlock(&mdp5_data->ov_lock);
5437 return -EPERM;
5438 }
5439
5440 if (sort_needed) {
5441 sorted_ovs = kcalloc(num_ovs, sizeof(*ip_ovs), GFP_KERNEL);
5442 if (!sorted_ovs) {
5443 pr_err("error allocating ovlist mem\n");
5444 return -ENOMEM;
5445 }
5446 memcpy(sorted_ovs, ip_ovs, num_ovs * sizeof(*ip_ovs));
5447 ret = __mdss_overlay_src_split_sort(mfd, sorted_ovs, num_ovs);
5448 if (ret) {
5449 pr_err("src_split_sort failed. ret=%d\n", ret);
5450 kfree(sorted_ovs);
5451 return ret;
5452 }
5453 }
5454
5455 pr_debug("prepare fb%d num_ovs=%d\n", mfd->index, num_ovs);
5456
5457 for (i = 0; i < num_ovs; i++) {
5458 if (IS_RIGHT_MIXER_OV(ip_ovs[i].flags, ip_ovs[i].dst_rect.x,
5459 left_lm_w))
5460 right_lm_ovs++;
5461 else
5462 left_lm_ovs++;
5463
5464 if ((left_lm_ovs > 1) && (right_lm_ovs > 1))
5465 break;
5466 }
5467
5468 for (i = 0; i < num_ovs; i++) {
5469 left_blend_pipe = NULL;
5470
5471 if (sort_needed) {
5472 req = &sorted_ovs[i];
5473 prev_req = (i > 0) ? &sorted_ovs[i - 1] : NULL;
5474
5475 /*
5476 * check if current overlay is at same z_order as
5477 * previous one and qualifies as a right blend. If yes,
5478 * pass a pointer to the pipe representing previous
5479 * overlay or in other terms left blend overlay.
5480 */
5481 if (prev_req && (prev_req->z_order == req->z_order) &&
5482 is_ov_right_blend(&prev_req->dst_rect,
5483 &req->dst_rect, left_lm_w)) {
5484 left_blend_pipe = pipe;
5485 }
5486 } else {
5487 req = &ip_ovs[i];
5488 }
5489
5490 if (IS_RIGHT_MIXER_OV(ip_ovs[i].flags, ip_ovs[i].dst_rect.x,
5491 left_lm_w))
5492 is_single_layer = (right_lm_ovs == 1);
5493 else
5494 is_single_layer = (left_lm_ovs == 1);
5495
5496 req->z_order += MDSS_MDP_STAGE_0;
5497 ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe,
5498 left_blend_pipe, is_single_layer);
5499 req->z_order -= MDSS_MDP_STAGE_0;
5500
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305501 if (IS_ERR_VALUE((unsigned long)ret))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305502 goto validate_exit;
5503
5504 pr_debug("pnum:%d id:0x%x flags:0x%x dst_x:%d l_blend_pnum%d\n",
5505 pipe->num, req->id, req->flags, req->dst_rect.x,
5506 left_blend_pipe ? left_blend_pipe->num : -1);
5507
5508 /* keep track of the new overlays to unset in case of errors */
5509 if (pipe->play_cnt == 0)
5510 new_reqs |= pipe->ndx;
5511
5512 if (IS_RIGHT_MIXER_OV(pipe->flags, pipe->dst.x, left_lm_w)) {
5513 if (right_cnt >= MAX_PIPES_PER_LM) {
5514 pr_err("too many pipes on right mixer\n");
5515 ret = -EINVAL;
5516 goto validate_exit;
5517 }
5518 right_plist[right_cnt] = pipe;
5519 right_cnt++;
5520 } else {
5521 if (left_cnt >= MAX_PIPES_PER_LM) {
5522 pr_err("too many pipes on left mixer\n");
5523 ret = -EINVAL;
5524 goto validate_exit;
5525 }
5526 left_plist[left_cnt] = pipe;
5527 left_cnt++;
5528 }
5529 }
5530
5531 ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
5532 right_plist, right_cnt);
5533
5534validate_exit:
5535 if (sort_needed)
5536 ovlist->processed_overlays =
5537 __mdss_overlay_map(sorted_ovs, ip_ovs, num_ovs, i);
5538 else
5539 ovlist->processed_overlays = i;
5540
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305541 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305542 pr_debug("err=%d total_ovs:%d processed:%d left:%d right:%d\n",
5543 ret, num_ovs, ovlist->processed_overlays, left_lm_ovs,
5544 right_lm_ovs);
5545 mdss_mdp_overlay_release(mfd, new_reqs);
5546 }
5547 mutex_unlock(&mdp5_data->ov_lock);
5548
5549 kfree(sorted_ovs);
5550
5551 return ret;
5552}
5553
5554static int __handle_ioctl_overlay_prepare(struct msm_fb_data_type *mfd,
5555 void __user *argp)
5556{
5557 struct mdp_overlay_list ovlist;
5558 struct mdp_overlay *req_list[OVERLAY_MAX];
5559 struct mdp_overlay *overlays;
5560 int i, ret;
5561
5562 if (!mfd_to_ctl(mfd))
5563 return -ENODEV;
5564
5565 if (copy_from_user(&ovlist, argp, sizeof(ovlist)))
5566 return -EFAULT;
5567
5568 if (ovlist.num_overlays > OVERLAY_MAX) {
5569 pr_err("Number of overlays exceeds max\n");
5570 return -EINVAL;
5571 }
5572
5573 overlays = kmalloc_array(ovlist.num_overlays, sizeof(*overlays),
5574 GFP_KERNEL);
5575 if (!overlays)
5576 return -ENOMEM;
5577
5578 if (copy_from_user(req_list, ovlist.overlay_list,
5579 sizeof(struct mdp_overlay *) *
5580 ovlist.num_overlays)) {
5581 ret = -EFAULT;
5582 goto validate_exit;
5583 }
5584
5585 for (i = 0; i < ovlist.num_overlays; i++) {
5586 if (copy_from_user(overlays + i, req_list[i],
5587 sizeof(struct mdp_overlay))) {
5588 ret = -EFAULT;
5589 goto validate_exit;
5590 }
5591 }
5592
5593 ret = __handle_overlay_prepare(mfd, &ovlist, overlays);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305594 if (!IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305595 for (i = 0; i < ovlist.num_overlays; i++) {
5596 if (copy_to_user(req_list[i], overlays + i,
5597 sizeof(struct mdp_overlay))) {
5598 ret = -EFAULT;
5599 goto validate_exit;
5600 }
5601 }
5602 }
5603
5604 if (copy_to_user(argp, &ovlist, sizeof(ovlist)))
5605 ret = -EFAULT;
5606
5607validate_exit:
5608 kfree(overlays);
5609
5610 return ret;
5611}
5612
5613static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
5614 u32 cmd, void __user *argp)
5615{
5616 struct mdp_overlay *req = NULL;
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305617 int val, ret = -ENOTSUPP;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305618 struct msmfb_metadata metadata;
5619 struct mdp_pp_feature_version pp_feature_version;
5620 struct msmfb_overlay_data data;
5621 struct mdp_set_cfg cfg;
5622
5623 switch (cmd) {
5624 case MSMFB_MDP_PP:
5625 ret = mdss_mdp_pp_ioctl(mfd, argp);
5626 break;
5627 case MSMFB_MDP_PP_GET_FEATURE_VERSION:
5628 ret = copy_from_user(&pp_feature_version, argp,
5629 sizeof(pp_feature_version));
5630 if (ret) {
5631 pr_err("copy_from_user failed for pp_feature_version\n");
5632 ret = -EFAULT;
5633 } else {
5634 ret = mdss_mdp_pp_get_version(&pp_feature_version);
5635 if (!ret) {
5636 ret = copy_to_user(argp, &pp_feature_version,
5637 sizeof(pp_feature_version));
5638 if (ret) {
5639 pr_err("copy_to_user failed for pp_feature_version\n");
5640 ret = -EFAULT;
5641 }
5642 } else {
5643 pr_err("get pp version failed ret %d\n", ret);
5644 }
5645 }
5646 break;
5647 case MSMFB_HISTOGRAM_START:
5648 case MSMFB_HISTOGRAM_STOP:
5649 case MSMFB_HISTOGRAM:
5650 ret = mdss_mdp_histo_ioctl(mfd, cmd, argp);
5651 break;
5652
5653 case MSMFB_OVERLAY_GET:
5654 req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
5655 if (!req)
5656 return -ENOMEM;
5657 ret = copy_from_user(req, argp, sizeof(*req));
5658 if (!ret) {
5659 ret = mdss_mdp_overlay_get(mfd, req);
5660
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305661 if (!IS_ERR_VALUE((unsigned long)ret))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305662 ret = copy_to_user(argp, req, sizeof(*req));
5663 }
5664
5665 if (ret)
5666 pr_debug("OVERLAY_GET failed (%d)\n", ret);
5667 break;
5668
5669 case MSMFB_OVERLAY_SET:
5670 req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
5671 if (!req)
5672 return -ENOMEM;
5673 ret = copy_from_user(req, argp, sizeof(*req));
5674 if (!ret) {
5675 ret = mdss_mdp_overlay_set(mfd, req);
5676
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305677 if (!IS_ERR_VALUE((unsigned long)ret))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305678 ret = copy_to_user(argp, req, sizeof(*req));
5679 }
5680 if (ret)
5681 pr_debug("OVERLAY_SET failed (%d)\n", ret);
5682 break;
5683
5684 case MSMFB_OVERLAY_UNSET:
5685 if (!IS_ERR_VALUE(copy_from_user(&val, argp, sizeof(val))))
5686 ret = mdss_mdp_overlay_unset(mfd, val);
5687 break;
5688
5689 case MSMFB_OVERLAY_PLAY:
5690 ret = copy_from_user(&data, argp, sizeof(data));
5691 if (!ret)
5692 ret = mdss_mdp_overlay_play(mfd, &data);
5693
5694 if (ret)
5695 pr_debug("OVERLAY_PLAY failed (%d)\n", ret);
5696 break;
5697
5698 case MSMFB_OVERLAY_VSYNC_CTRL:
5699 if (!copy_from_user(&val, argp, sizeof(val))) {
5700 ret = mdss_mdp_overlay_vsync_ctrl(mfd, val);
5701 } else {
5702 pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed (%d)\n", ret);
5703 ret = -EFAULT;
5704 }
5705 break;
5706
5707 case MSMFB_METADATA_SET:
5708 ret = copy_from_user(&metadata, argp, sizeof(metadata));
5709 if (ret)
5710 return ret;
5711 ret = mdss_fb_set_metadata(mfd, &metadata);
5712 break;
5713
5714 case MSMFB_METADATA_GET:
5715 ret = copy_from_user(&metadata, argp, sizeof(metadata));
5716 if (ret)
5717 return ret;
5718 ret = mdss_fb_get_metadata(mfd, &metadata);
5719 if (!ret)
5720 ret = copy_to_user(argp, &metadata, sizeof(metadata));
5721 break;
5722
5723 case MSMFB_OVERLAY_PREPARE:
5724 ret = __handle_ioctl_overlay_prepare(mfd, argp);
5725 break;
5726 case MSMFB_MDP_SET_CFG:
5727 ret = copy_from_user(&cfg, argp, sizeof(cfg));
5728 if (ret) {
5729 pr_err("copy failed MSMFB_MDP_SET_CFG ret %d\n", ret);
5730 ret = -EFAULT;
5731 break;
5732 }
5733 ret = mdss_mdp_set_cfg(mfd, &cfg);
5734 break;
5735
5736 default:
5737 break;
5738 }
5739
5740 kfree(req);
5741 return ret;
5742}
5743
5744/**
5745 * __mdss_mdp_overlay_ctl_init - Helper function to initialize control structure
5746 * @mfd: msm frame buffer data structure associated with the fb device.
5747 *
5748 * Helper function that allocates and initializes the mdp control structure
5749 * for a frame buffer device. Whenever applicable, this function will also setup
5750 * the control for the split display path as well.
5751 *
5752 * Return: pointer to the newly allocated control structure.
5753 */
5754static struct mdss_mdp_ctl *__mdss_mdp_overlay_ctl_init(
5755 struct msm_fb_data_type *mfd)
5756{
5757 int rc = 0;
5758 struct mdss_mdp_ctl *ctl;
5759 struct mdss_panel_data *pdata;
5760 struct mdss_overlay_private *mdp5_data;
5761
5762 if (!mfd)
5763 return ERR_PTR(-EINVAL);
5764
5765 pdata = dev_get_platdata(&mfd->pdev->dev);
5766 if (!pdata) {
5767 pr_err("no panel connected for fb%d\n", mfd->index);
5768 rc = -ENODEV;
5769 goto error;
5770 }
5771
5772 mdp5_data = mfd_to_mdp5_data(mfd);
5773 if (!mdp5_data) {
5774 rc = -EINVAL;
5775 goto error;
5776 }
5777
5778 ctl = mdss_mdp_ctl_init(pdata, mfd);
5779 if (IS_ERR_OR_NULL(ctl)) {
5780 pr_err("Unable to initialize ctl for fb%d\n",
5781 mfd->index);
5782 rc = PTR_ERR(ctl);
5783 goto error;
5784 }
5785 ctl->is_master = true;
5786 ctl->vsync_handler.vsync_handler =
5787 mdss_mdp_overlay_handle_vsync;
5788 ctl->vsync_handler.cmd_post_flush = false;
5789
5790 ctl->recover_underrun_handler.vsync_handler =
5791 mdss_mdp_recover_underrun_handler;
5792 ctl->recover_underrun_handler.cmd_post_flush = false;
5793
5794 ctl->frc_vsync_handler.vsync_handler =
5795 mdss_mdp_overlay_frc_handler;
5796 ctl->frc_vsync_handler.cmd_post_flush = false;
5797
5798 ctl->lineptr_handler.lineptr_handler =
5799 mdss_mdp_overlay_handle_lineptr;
5800
5801 INIT_WORK(&ctl->remove_underrun_handler,
5802 remove_underrun_vsync_handler);
5803
5804 if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
5805 /* enable split display */
5806 rc = mdss_mdp_ctl_split_display_setup(ctl, pdata->next);
5807 if (rc) {
5808 mdss_mdp_ctl_destroy(ctl);
5809 goto error;
5810 }
5811 }
5812
5813 mdp5_data->ctl = ctl;
5814error:
5815 if (rc)
5816 return ERR_PTR(rc);
5817 else
5818 return ctl;
5819}
5820
5821static void mdss_mdp_set_lm_flag(struct msm_fb_data_type *mfd)
5822{
5823 u32 width;
5824 struct mdss_data_type *mdata;
5825
5826 /* if lm_widths are set, the split_mode would have been set */
5827 if (mfd->panel_info->lm_widths[0] && mfd->panel_info->lm_widths[1])
5828 return;
5829
5830 mdata = mdss_mdp_get_mdata();
5831 width = mfd->fbi->var.xres;
5832
5833 /* setting the appropriate split_mode for HDMI usecases */
5834 if ((mfd->split_mode == MDP_SPLIT_MODE_NONE ||
5835 mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) &&
5836 (width > mdata->max_mixer_width)) {
5837 width /= 2;
5838 mfd->split_mode = MDP_DUAL_LM_SINGLE_DISPLAY;
5839 mfd->split_fb_left = width;
5840 mfd->split_fb_right = width;
5841 } else if (is_dual_lm_single_display(mfd) &&
5842 (width <= mdata->max_mixer_width)) {
5843 mfd->split_mode = MDP_SPLIT_MODE_NONE;
5844 mfd->split_fb_left = 0;
5845 mfd->split_fb_right = 0;
5846 }
5847}
5848
5849static void mdss_mdp_handle_invalid_switch_state(struct msm_fb_data_type *mfd)
5850{
5851 int rc = 0;
5852 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
5853 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
5854 struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
5855 struct mdss_mdp_data *buf, *tmpbuf;
5856
5857 mfd->switch_state = MDSS_MDP_NO_UPDATE_REQUESTED;
5858
5859 /*
5860 * Handle only for cmd mode panels as for video mode, buffers
5861 * cannot be freed at this point. Needs revisting to handle the
5862 * use case for video mode panels.
5863 */
5864 if (mfd->panel_info->type == MIPI_CMD_PANEL) {
5865 if (ctl->ops.wait_pingpong)
5866 rc = ctl->ops.wait_pingpong(ctl, NULL);
5867 if (!rc && sctl && sctl->ops.wait_pingpong)
5868 rc = sctl->ops.wait_pingpong(sctl, NULL);
5869 if (rc) {
5870 pr_err("wait for pp failed\n");
5871 return;
5872 }
5873
5874 mutex_lock(&mdp5_data->list_lock);
5875 list_for_each_entry_safe(buf, tmpbuf,
5876 &mdp5_data->bufs_used, buf_list)
5877 list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
5878 mutex_unlock(&mdp5_data->list_lock);
5879 }
5880}
5881
5882static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
5883{
5884 int rc;
5885 struct mdss_overlay_private *mdp5_data;
5886 struct mdss_mdp_ctl *ctl = NULL;
5887 struct mdss_data_type *mdata;
5888
5889 if (!mfd)
5890 return -ENODEV;
5891
5892 if (mfd->key != MFD_KEY)
5893 return -EINVAL;
5894
5895 mdp5_data = mfd_to_mdp5_data(mfd);
5896 if (!mdp5_data)
5897 return -EINVAL;
5898
5899 mdata = mfd_to_mdata(mfd);
5900 if (!mdata)
5901 return -EINVAL;
5902
5903 mdss_mdp_set_lm_flag(mfd);
5904
5905 if (!mdp5_data->ctl) {
5906 ctl = __mdss_mdp_overlay_ctl_init(mfd);
5907 if (IS_ERR_OR_NULL(ctl))
5908 return PTR_ERR(ctl);
5909 } else {
5910 ctl = mdp5_data->ctl;
5911 }
5912
5913 if (mfd->panel_info->type == WRITEBACK_PANEL && !mdp5_data->wfd) {
5914 mdp5_data->wfd = mdss_mdp_wfd_init(&mfd->pdev->dev, ctl);
5915 if (IS_ERR_OR_NULL(mdp5_data->wfd)) {
5916 rc = PTR_ERR(mdp5_data->wfd);
5917 goto panel_on;
5918 }
5919 }
5920
5921 if (mdss_fb_is_power_on(mfd)) {
5922 pr_debug("panel was never turned off\n");
5923 rc = mdss_mdp_ctl_start(ctl, false);
5924 goto panel_on;
5925 }
5926
5927 rc = mdss_mdp_ctl_intf_event(mdp5_data->ctl, MDSS_EVENT_RESET,
5928 NULL, false);
5929 if (rc)
5930 goto panel_on;
5931
5932 /* Skip the overlay start and kickoff for all displays
5933 * if handoff is pending. Previously we skipped it for DTV
5934 * panel and pluggable panels (bridge chip hdmi case). But
5935 * it does not cover the case where there is a non pluggable
5936 * tertiary display. Using the flag handoff_pending to skip
5937 * overlay start and kickoff should cover all cases
5938 * TODO: In the long run, the overlay start and kickoff
5939 * should not be skipped, instead, the handoff can be done
5940 */
5941 if (!mfd->panel_info->cont_splash_enabled &&
5942 !mdata->handoff_pending) {
5943 rc = mdss_mdp_overlay_start(mfd);
5944 if (rc)
5945 goto end;
Krishna Manikandanb296a2b2018-03-21 17:16:31 +05305946 if (mfd->panel_info->type != WRITEBACK_PANEL)
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305947 rc = mdss_mdp_overlay_kickoff(mfd, NULL);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305948 } else {
5949 rc = mdss_mdp_ctl_setup(ctl);
5950 if (rc)
5951 goto end;
5952 }
5953
5954panel_on:
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305955 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305956 pr_err("Failed to turn on fb%d\n", mfd->index);
5957 mdss_mdp_overlay_off(mfd);
5958 goto end;
5959 }
5960
5961end:
5962 return rc;
5963}
5964
5965static int mdss_mdp_handoff_cleanup_ctl(struct msm_fb_data_type *mfd)
5966{
5967 int rc;
5968 int need_cleanup;
5969 struct mdss_overlay_private *mdp5_data;
5970
5971 if (!mfd)
5972 return -ENODEV;
5973
5974 if (mfd->key != MFD_KEY)
5975 return -EINVAL;
5976
5977 mdp5_data = mfd_to_mdp5_data(mfd);
5978
5979 mdss_mdp_overlay_free_fb_pipe(mfd);
5980
5981 mutex_lock(&mdp5_data->list_lock);
5982 need_cleanup = !list_empty(&mdp5_data->pipes_cleanup) ||
5983 !list_empty(&mdp5_data->pipes_used);
5984 mutex_unlock(&mdp5_data->list_lock);
5985
5986 if (need_cleanup)
5987 mdss_mdp_overlay_kickoff(mfd, NULL);
5988
5989 rc = mdss_mdp_ctl_stop(mdp5_data->ctl, mfd->panel_power_state);
5990 if (!rc) {
5991 if (mdss_fb_is_power_off(mfd)) {
5992 mutex_lock(&mdp5_data->list_lock);
5993 __mdss_mdp_overlay_free_list_purge(mfd);
5994 mutex_unlock(&mdp5_data->list_lock);
5995 }
5996 }
5997
5998 rc = mdss_mdp_splash_cleanup(mfd, false);
5999 if (rc)
6000 pr_err("%s: failed splash clean up %d\n", __func__, rc);
6001
6002 return rc;
6003}
6004
6005static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
6006{
6007 int rc;
6008 struct mdss_overlay_private *mdp5_data;
6009 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
6010 struct mdss_mdp_mixer *mixer;
6011 int need_cleanup;
6012 int retire_cnt;
6013 bool destroy_ctl = false;
6014
6015 if (!mfd)
6016 return -ENODEV;
6017
6018 if (mfd->key != MFD_KEY)
6019 return -EINVAL;
6020
6021 mdp5_data = mfd_to_mdp5_data(mfd);
6022
6023 if (!mdp5_data || !mdp5_data->ctl) {
6024 pr_err("ctl not initialized\n");
6025 return -ENODEV;
6026 }
6027
6028 /*
6029 * Keep a reference to the runtime pm until the overlay is turned
6030 * off, and then release this last reference at the end. This will
6031 * help in distinguishing between idle power collapse versus suspend
6032 * power collapse
6033 */
6034 pm_runtime_get_sync(&mfd->pdev->dev);
6035
6036 if (mdss_fb_is_power_on_lp(mfd)) {
6037 pr_debug("panel not turned off. keeping overlay on\n");
6038 goto ctl_stop;
6039 }
6040
6041 mutex_lock(&mdp5_data->ov_lock);
6042
6043 mdss_mdp_overlay_free_fb_pipe(mfd);
6044
6045 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_LEFT);
6046 if (mixer)
6047 mixer->cursor_enabled = 0;
6048
6049 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_RIGHT);
6050 if (mixer)
6051 mixer->cursor_enabled = 0;
6052
6053 mutex_lock(&mdp5_data->list_lock);
6054 need_cleanup = !list_empty(&mdp5_data->pipes_cleanup);
6055 mutex_unlock(&mdp5_data->list_lock);
6056 mutex_unlock(&mdp5_data->ov_lock);
6057
6058 destroy_ctl = !mfd->ref_cnt || mfd->panel_reconfig;
6059
6060 mutex_lock(&mfd->switch_lock);
6061 if (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED) {
6062 destroy_ctl = true;
6063 need_cleanup = false;
6064 pr_warn("fb%d blank while mode switch (%d) in progress\n",
6065 mfd->index, mfd->switch_state);
6066 mdss_mdp_handle_invalid_switch_state(mfd);
6067 }
6068 mutex_unlock(&mfd->switch_lock);
6069
6070 if (need_cleanup) {
6071 pr_debug("cleaning up pipes on fb%d\n", mfd->index);
6072 if (mdata->handoff_pending)
6073 mdp5_data->allow_kickoff = true;
6074
6075 mdss_mdp_overlay_kickoff(mfd, NULL);
6076 } else if (!mdss_mdp_ctl_is_power_on(mdp5_data->ctl)) {
6077 if (mfd->panel_reconfig) {
6078 if (mfd->panel_info->cont_splash_enabled)
6079 mdss_mdp_handoff_cleanup_ctl(mfd);
6080
6081 mdp5_data->borderfill_enable = false;
6082 mdss_mdp_ctl_destroy(mdp5_data->ctl);
6083 mdp5_data->ctl = NULL;
6084 }
6085 goto end;
6086 }
6087
6088 /*
6089 * If retire fences are still active wait for a vsync time
6090 * for retire fence to be updated.
6091 * As a last resort signal the timeline if vsync doesn't arrive.
6092 */
6093 mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
6094 retire_cnt = mdp5_data->retire_cnt;
6095 mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
6096 if (retire_cnt) {
6097 u32 fps = mdss_panel_get_framerate(mfd->panel_info,
6098 FPS_RESOLUTION_HZ);
6099 u32 vsync_time = 1000 / (fps ? : DEFAULT_FRAME_RATE);
6100
6101 msleep(vsync_time);
6102
6103 mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
6104 retire_cnt = mdp5_data->retire_cnt;
6105 mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
6106 __vsync_retire_signal(mfd, retire_cnt);
6107
6108 /*
6109 * the retire work can still schedule after above retire_signal
6110 * api call. Flush workqueue guarantees that current caller
6111 * context is blocked till retire_work finishes. Any work
6112 * schedule after flush call should not cause any issue because
6113 * retire_signal api checks for retire_cnt with sync_mutex lock.
6114 */
6115
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05306116 kthread_flush_work(&mdp5_data->vsync_work);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306117 }
6118
6119ctl_stop:
6120 mutex_lock(&mdp5_data->ov_lock);
6121 /* set the correct pipe_mapped before ctl_stop */
6122 mdss_mdp_mixer_update_pipe_map(mdp5_data->ctl,
6123 MDSS_MDP_MIXER_MUX_LEFT);
6124 mdss_mdp_mixer_update_pipe_map(mdp5_data->ctl,
6125 MDSS_MDP_MIXER_MUX_RIGHT);
6126 rc = mdss_mdp_ctl_stop(mdp5_data->ctl, mfd->panel_power_state);
6127 if (rc == 0) {
6128 if (mdss_fb_is_power_off(mfd)) {
6129 mutex_lock(&mdp5_data->list_lock);
6130 __mdss_mdp_overlay_free_list_purge(mfd);
6131 if (!mfd->ref_cnt)
6132 mdss_mdp_overlay_buf_deinit(mfd);
6133 mutex_unlock(&mdp5_data->list_lock);
6134 mdss_mdp_ctl_notifier_unregister(mdp5_data->ctl,
6135 &mfd->mdp_sync_pt_data.notifier);
6136
6137 if (destroy_ctl) {
6138 mdp5_data->borderfill_enable = false;
6139 mdss_mdp_ctl_destroy(mdp5_data->ctl);
6140 mdp5_data->ctl = NULL;
6141 }
6142
6143 atomic_dec(&mdp5_data->mdata->active_intf_cnt);
6144
6145 if (!mdp5_data->mdata->idle_pc_enabled ||
6146 (mfd->panel_info->type != MIPI_CMD_PANEL)) {
6147 rc = pm_runtime_put(&mfd->pdev->dev);
6148 if (rc)
6149 pr_err("unable to suspend w/pm_runtime_put (%d)\n",
6150 rc);
6151 }
6152 }
6153 }
6154 mutex_unlock(&mdp5_data->ov_lock);
6155
6156 if (mdp5_data->wfd) {
6157 mdss_mdp_wfd_deinit(mdp5_data->wfd);
6158 mdp5_data->wfd = NULL;
6159 }
6160
6161end:
6162 /* Release the last reference to the runtime device */
6163 rc = pm_runtime_put(&mfd->pdev->dev);
6164 if (rc)
6165 pr_err("unable to suspend w/pm_runtime_put (%d)\n", rc);
6166
6167 return rc;
6168}
6169
6170static int __mdss_mdp_ctl_handoff(struct msm_fb_data_type *mfd,
6171 struct mdss_mdp_ctl *ctl, struct mdss_data_type *mdata)
6172{
6173 int rc = 0;
6174 int i, j;
6175 u32 mixercfg;
6176 struct mdss_mdp_pipe *pipe = NULL;
6177 struct mdss_overlay_private *mdp5_data;
6178
6179 if (!ctl || !mdata)
6180 return -EINVAL;
6181
6182 mdp5_data = mfd_to_mdp5_data(mfd);
6183
6184 for (i = 0; i < mdata->nmixers_intf; i++) {
6185 mixercfg = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_LAYER(i));
6186 pr_debug("for lm%d mixercfg = 0x%09x\n", i, mixercfg);
6187
6188 j = MDSS_MDP_SSPP_VIG0;
6189 for (; j < MDSS_MDP_SSPP_CURSOR0 && mixercfg; j++) {
6190 u32 cfg = j * 3;
6191
6192 if ((j == MDSS_MDP_SSPP_VIG3) ||
6193 (j == MDSS_MDP_SSPP_RGB3)) {
6194 /* Add 2 to account for Cursor & Border bits */
6195 cfg += 2;
6196 }
6197 if (mixercfg & (0x7 << cfg)) {
6198 pr_debug("Pipe %d staged\n", j);
6199 /* bootloader display always uses RECT0 */
6200 pipe = mdss_mdp_pipe_search(mdata, BIT(j),
6201 MDSS_MDP_PIPE_RECT0);
6202 if (!pipe) {
6203 pr_warn("Invalid pipe %d staged\n", j);
6204 continue;
6205 }
6206
6207 rc = mdss_mdp_pipe_handoff(pipe);
6208 if (rc) {
6209 pr_err("Failed to handoff pipe%d\n",
6210 pipe->num);
6211 goto exit;
6212 }
6213
6214 pipe->mfd = mfd;
6215 mutex_lock(&mdp5_data->list_lock);
6216 list_add(&pipe->list, &mdp5_data->pipes_used);
6217 mutex_unlock(&mdp5_data->list_lock);
6218
6219 rc = mdss_mdp_mixer_handoff(ctl, i, pipe);
6220 if (rc) {
6221 pr_err("failed to handoff mix%d\n", i);
6222 goto exit;
6223 }
6224 }
6225 }
6226 }
6227exit:
6228 return rc;
6229}
6230
6231/**
6232 * mdss_mdp_overlay_handoff() - Read MDP registers to handoff an active ctl path
6233 * @mfd: Msm frame buffer structure associated with the fb device.
6234 *
6235 * This function populates the MDP software structures with the current state of
6236 * the MDP hardware to handoff any active control path for the framebuffer
6237 * device. This is needed to identify any ctl, mixers and pipes being set up by
6238 * the bootloader to display the splash screen when the continuous splash screen
6239 * feature is enabled in kernel.
6240 */
6241static int mdss_mdp_overlay_handoff(struct msm_fb_data_type *mfd)
6242{
6243 int rc = 0;
6244 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
6245 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
6246 struct mdss_mdp_ctl *ctl = NULL;
6247 struct mdss_mdp_ctl *sctl = NULL;
6248
6249 if (!mdp5_data->ctl) {
6250 ctl = __mdss_mdp_overlay_ctl_init(mfd);
6251 if (IS_ERR_OR_NULL(ctl)) {
6252 rc = PTR_ERR(ctl);
6253 goto error;
6254 }
6255 } else {
6256 ctl = mdp5_data->ctl;
6257 }
6258
6259 /*
6260 * vsync interrupt needs on during continuous splash, this is
6261 * to initialize necessary ctl members here.
6262 */
6263 rc = mdss_mdp_ctl_start(ctl, true);
6264 if (rc) {
6265 pr_err("Failed to initialize ctl\n");
6266 goto error;
6267 }
6268
6269 ctl->clk_rate = mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false);
6270 pr_debug("Set the ctl clock rate to %d Hz\n", ctl->clk_rate);
6271
6272 rc = __mdss_mdp_ctl_handoff(mfd, ctl, mdata);
6273 if (rc) {
6274 pr_err("primary ctl handoff failed. rc=%d\n", rc);
6275 goto error;
6276 }
6277
6278 if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
6279 sctl = mdss_mdp_get_split_ctl(ctl);
6280 if (!sctl) {
6281 pr_err("cannot get secondary ctl. fail the handoff\n");
6282 rc = -EPERM;
6283 goto error;
6284 }
6285 rc = __mdss_mdp_ctl_handoff(mfd, sctl, mdata);
6286 if (rc) {
6287 pr_err("secondary ctl handoff failed. rc=%d\n", rc);
6288 goto error;
6289 }
6290 }
6291
6292 rc = mdss_mdp_smp_handoff(mdata);
6293 if (rc)
6294 pr_err("Failed to handoff smps\n");
6295
6296 mdp5_data->handoff = true;
6297
6298error:
6299 if (rc && ctl) {
6300 mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_RGB);
6301 mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_VIG);
6302 mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_DMA);
6303 mdss_mdp_ctl_destroy(ctl);
6304 mdp5_data->ctl = NULL;
6305 mdp5_data->handoff = false;
6306 }
6307
6308 return rc;
6309}
6310
6311static void __vsync_retire_handle_vsync(struct mdss_mdp_ctl *ctl, ktime_t t)
6312{
6313 struct msm_fb_data_type *mfd = ctl->mfd;
6314 struct mdss_overlay_private *mdp5_data;
6315
6316 if (!mfd || !mfd->mdp.private1) {
6317 pr_warn("Invalid handle for vsync\n");
6318 return;
6319 }
6320
6321 mdp5_data = mfd_to_mdp5_data(mfd);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05306322 kthread_queue_work(&mdp5_data->worker, &mdp5_data->vsync_work);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306323}
6324
6325static void __vsync_retire_work_handler(struct kthread_work *work)
6326{
6327 struct mdss_overlay_private *mdp5_data =
6328 container_of(work, typeof(*mdp5_data), vsync_work);
6329
6330 if (!mdp5_data->ctl || !mdp5_data->ctl->mfd)
6331 return;
6332
6333 if (!mdp5_data->ctl->ops.remove_vsync_handler)
6334 return;
6335
6336 __vsync_retire_signal(mdp5_data->ctl->mfd, 1);
6337}
6338
6339static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
6340{
6341 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
6342
6343 mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
6344 if (mdp5_data->retire_cnt > 0) {
Krishna Manikandanb296a2b2018-03-21 17:16:31 +05306345 mdss_inc_timeline(mfd->mdp_sync_pt_data.timeline_retire, val);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306346 mdp5_data->retire_cnt -= min(val, mdp5_data->retire_cnt);
6347 pr_debug("Retire signaled! timeline val=%d remaining=%d\n",
Krishna Manikandanb296a2b2018-03-21 17:16:31 +05306348 mdss_get_timeline_retire_ts(
6349 mfd->mdp_sync_pt_data.timeline_retire),
Sachin Bhayare2b6d0042018-01-13 19:38:21 +05306350 mdp5_data->retire_cnt);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306351
6352 if (mdp5_data->retire_cnt == 0) {
6353 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
6354 mdp5_data->ctl->ops.remove_vsync_handler(mdp5_data->ctl,
6355 &mdp5_data->vsync_retire_handler);
6356 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
6357 }
6358 }
6359 mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
6360}
6361
Sachin Bhayare2b6d0042018-01-13 19:38:21 +05306362static struct mdss_fence *
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306363__vsync_retire_get_fence(struct msm_sync_pt_data *sync_pt_data)
6364{
6365 struct msm_fb_data_type *mfd;
6366 struct mdss_overlay_private *mdp5_data;
6367 struct mdss_mdp_ctl *ctl;
6368 int value;
6369
6370 mfd = container_of(sync_pt_data, typeof(*mfd), mdp_sync_pt_data);
6371 mdp5_data = mfd_to_mdp5_data(mfd);
6372
6373 if (!mdp5_data || !mdp5_data->ctl)
6374 return ERR_PTR(-ENODEV);
6375
6376 ctl = mdp5_data->ctl;
6377 if (!ctl->ops.add_vsync_handler)
6378 return ERR_PTR(-EOPNOTSUPP);
6379
6380 if (!mdss_mdp_ctl_is_power_on(ctl)) {
6381 pr_debug("fb%d vsync pending first update\n", mfd->index);
6382 return ERR_PTR(-EPERM);
6383 }
6384
Sachin Bhayare2b6d0042018-01-13 19:38:21 +05306385 value = 1 + mdp5_data->retire_cnt;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306386 mdp5_data->retire_cnt++;
6387
Krishna Manikandanb296a2b2018-03-21 17:16:31 +05306388 return mdss_fb_sync_get_fence(mfd->mdp_sync_pt_data.timeline_retire,
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306389 "mdp-retire", value);
6390}
6391
6392static int __vsync_set_vsync_handler(struct msm_fb_data_type *mfd)
6393{
6394 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
6395 struct mdss_mdp_ctl *ctl;
6396 int rc;
6397 int retire_cnt;
6398
6399 ctl = mdp5_data->ctl;
6400 mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
6401 retire_cnt = mdp5_data->retire_cnt;
6402 mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
6403 if (!retire_cnt || mdp5_data->vsync_retire_handler.enabled)
6404 return 0;
6405
6406 if (!ctl->ops.add_vsync_handler)
6407 return -EOPNOTSUPP;
6408
6409 if (!mdss_mdp_ctl_is_power_on(ctl)) {
6410 pr_debug("fb%d vsync pending first update\n", mfd->index);
6411 return -EPERM;
6412 }
6413
6414 rc = ctl->ops.add_vsync_handler(ctl,
6415 &mdp5_data->vsync_retire_handler);
6416 return rc;
6417}
6418
6419static int __vsync_retire_setup(struct msm_fb_data_type *mfd)
6420{
6421 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
6422 char name[24];
6423 struct sched_param param = { .sched_priority = 5 };
6424
6425 snprintf(name, sizeof(name), "mdss_fb%d_retire", mfd->index);
Krishna Manikandanb296a2b2018-03-21 17:16:31 +05306426 mfd->mdp_sync_pt_data.timeline_retire = mdss_create_timeline(name);
6427 if (mfd->mdp_sync_pt_data.timeline_retire == NULL) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306428 pr_err("cannot vsync create time line");
6429 return -ENOMEM;
6430 }
6431
Krishna Manikandanb296a2b2018-03-21 17:16:31 +05306432 /*
6433 * vsync_work is required only for command mode panels and for panels
6434 * with dynamic mode switch supported. For all other panels the retire
6435 * fence is signaled along with the release fence once the frame
6436 * transfer is done.
6437 */
6438 if ((mfd->panel_info->mipi.dms_mode) ||
6439 (mfd->panel_info->type == MIPI_CMD_PANEL)) {
6440 kthread_init_worker(&mdp5_data->worker);
6441 kthread_init_work(&mdp5_data->vsync_work,
6442 __vsync_retire_work_handler);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306443
Krishna Manikandanb296a2b2018-03-21 17:16:31 +05306444 mdp5_data->thread = kthread_run(kthread_worker_fn,
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306445 &mdp5_data->worker,
6446 "vsync_retire_work");
Krishna Manikandanb296a2b2018-03-21 17:16:31 +05306447 if (IS_ERR(mdp5_data->thread)) {
6448 pr_err("unable to start vsync thread\n");
6449 mdp5_data->thread = NULL;
6450 return -ENOMEM;
6451 }
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306452
Krishna Manikandanb296a2b2018-03-21 17:16:31 +05306453 sched_setscheduler(mdp5_data->thread, SCHED_FIFO, &param);
6454 mfd->mdp_sync_pt_data.get_retire_fence =
6455 __vsync_retire_get_fence;
6456 mdp5_data->vsync_retire_handler.vsync_handler =
6457 __vsync_retire_handle_vsync;
6458 mdp5_data->vsync_retire_handler.cmd_post_flush = false;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306459 }
6460
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306461 return 0;
6462}
6463
6464static int mdss_mdp_update_panel_info(struct msm_fb_data_type *mfd,
6465 int mode, int dest_ctrl)
6466{
6467 int ret = 0;
6468 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
6469 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
6470 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
6471 struct mdss_panel_data *pdata;
6472 struct mdss_mdp_ctl *sctl;
6473
6474 if (ctl == NULL) {
6475 pr_debug("ctl not initialized\n");
6476 return 0;
6477 }
6478
6479 ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_UPDATE_PANEL_DATA,
6480 (void *)(unsigned long)mode, CTL_INTF_EVENT_FLAG_DEFAULT);
6481 if (ret)
6482 pr_err("Dynamic switch to %s mode failed!\n",
6483 mode ? "command" : "video");
6484
6485 if (dest_ctrl) {
6486 /*
6487 * Destroy current ctrl structure as this is
6488 * going to be re-initialized with the requested mode.
6489 */
6490 mdss_mdp_ctl_destroy(mdp5_data->ctl);
6491 mdp5_data->ctl = NULL;
6492 } else {
6493 pdata = dev_get_platdata(&mfd->pdev->dev);
6494
6495 if (mdp5_data->mdata->has_pingpong_split &&
6496 pdata->panel_info.use_pingpong_split)
6497 mfd->split_mode = MDP_PINGPONG_SPLIT;
6498 /*
6499 * Dynamic change so we need to reconfig instead of
6500 * destroying current ctrl structure.
6501 */
6502 mdss_mdp_ctl_reconfig(ctl, pdata);
6503
6504 /*
6505 * Set flag when dynamic resolution switch happens before
6506 * handoff of cont-splash
6507 */
6508 if (mdata->handoff_pending)
6509 ctl->switch_with_handoff = true;
6510
6511 sctl = mdss_mdp_get_split_ctl(ctl);
6512 if (sctl) {
6513 if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
6514 mdss_mdp_ctl_reconfig(sctl, pdata->next);
6515 sctl->border_x_off +=
6516 pdata->panel_info.lcdc.border_left +
6517 pdata->panel_info.lcdc.border_right;
6518 } else {
6519 /*
6520 * todo: need to revisit this and properly
6521 * cleanup slave resources
6522 */
6523 mdss_mdp_ctl_destroy(sctl);
6524 ctl->mixer_right = NULL;
6525 }
6526 } else if (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
6527 /* enable split display for the first time */
6528 ret = mdss_mdp_ctl_split_display_setup(ctl,
6529 pdata->next);
6530 if (ret) {
6531 mdss_mdp_ctl_destroy(ctl);
6532 mdp5_data->ctl = NULL;
6533 }
6534 }
6535 }
6536
6537 return ret;
6538}
6539
6540int mdss_mdp_input_event_handler(struct msm_fb_data_type *mfd)
6541{
6542 int rc = 0;
6543 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
6544
6545 if (ctl && mdss_panel_is_power_on(ctl->power_state) &&
6546 ctl->ops.early_wake_up_fnc)
6547 rc = ctl->ops.early_wake_up_fnc(ctl);
6548
6549 return rc;
6550}
6551
6552static void mdss_mdp_signal_retire_fence(struct msm_fb_data_type *mfd,
6553 int retire_cnt)
6554{
6555 __vsync_retire_signal(mfd, retire_cnt);
6556 pr_debug("Signaled (%d) pending retire fence\n", retire_cnt);
6557}
6558
6559int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
6560{
6561 struct device *dev = mfd->fbi->dev;
6562 struct msm_mdp_interface *mdp5_interface = &mfd->mdp;
6563 struct mdss_overlay_private *mdp5_data = NULL;
6564 struct irq_info *mdss_irq;
6565 int rc;
6566
6567 mdp5_data = kcalloc(1, sizeof(struct mdss_overlay_private), GFP_KERNEL);
6568 if (!mdp5_data)
6569 return -ENOMEM;
6570
6571 mdp5_data->frc_fsm
6572 = kcalloc(1, sizeof(struct mdss_mdp_frc_fsm), GFP_KERNEL);
6573 if (!mdp5_data->frc_fsm) {
6574 rc = -ENOMEM;
6575 pr_err("fail to allocate mdp5 frc fsm structure\n");
6576 goto init_fail1;
6577 }
6578
6579 mdp5_data->mdata = dev_get_drvdata(mfd->pdev->dev.parent);
6580 if (!mdp5_data->mdata) {
6581 pr_err("unable to initialize overlay for fb%d\n", mfd->index);
6582 rc = -ENODEV;
6583 goto init_fail;
6584 }
6585
6586 mdp5_interface->on_fnc = mdss_mdp_overlay_on;
6587 mdp5_interface->off_fnc = mdss_mdp_overlay_off;
6588 mdp5_interface->release_fnc = __mdss_mdp_overlay_release_all;
6589 mdp5_interface->do_histogram = NULL;
6590 if (mdp5_data->mdata->ncursor_pipes)
6591 mdp5_interface->cursor_update = mdss_mdp_hw_cursor_pipe_update;
6592 else
6593 mdp5_interface->cursor_update = mdss_mdp_hw_cursor_update;
6594 mdp5_interface->async_position_update =
6595 mdss_mdp_async_position_update;
6596 mdp5_interface->dma_fnc = mdss_mdp_overlay_pan_display;
6597 mdp5_interface->ioctl_handler = mdss_mdp_overlay_ioctl_handler;
6598 mdp5_interface->kickoff_fnc = mdss_mdp_overlay_kickoff;
6599 mdp5_interface->mode_switch = mdss_mode_switch;
6600 mdp5_interface->mode_switch_post = mdss_mode_switch_post;
6601 mdp5_interface->pre_commit_fnc = mdss_mdp_overlay_precommit;
6602 mdp5_interface->splash_init_fnc = mdss_mdp_splash_init;
6603 mdp5_interface->configure_panel = mdss_mdp_update_panel_info;
6604 mdp5_interface->input_event_handler = mdss_mdp_input_event_handler;
6605 mdp5_interface->signal_retire_fence = mdss_mdp_signal_retire_fence;
6606
6607 if (mfd->panel_info->type == WRITEBACK_PANEL) {
6608 mdp5_interface->atomic_validate =
6609 mdss_mdp_layer_atomic_validate_wfd;
6610 mdp5_interface->pre_commit = mdss_mdp_layer_pre_commit_wfd;
6611 mdp5_interface->is_config_same = mdss_mdp_wfd_is_config_same;
6612 } else {
6613 mdp5_interface->atomic_validate =
6614 mdss_mdp_layer_atomic_validate;
6615 mdp5_interface->pre_commit = mdss_mdp_layer_pre_commit;
6616 }
6617
6618 INIT_LIST_HEAD(&mdp5_data->pipes_used);
6619 INIT_LIST_HEAD(&mdp5_data->pipes_cleanup);
6620 INIT_LIST_HEAD(&mdp5_data->pipes_destroy);
6621 INIT_LIST_HEAD(&mdp5_data->bufs_pool);
6622 INIT_LIST_HEAD(&mdp5_data->bufs_chunks);
6623 INIT_LIST_HEAD(&mdp5_data->bufs_used);
6624 INIT_LIST_HEAD(&mdp5_data->bufs_freelist);
6625 INIT_LIST_HEAD(&mdp5_data->rot_proc_list);
6626 mutex_init(&mdp5_data->list_lock);
6627 mutex_init(&mdp5_data->ov_lock);
6628 mutex_init(&mdp5_data->dfps_lock);
6629 mdp5_data->hw_refresh = true;
6630 mdp5_data->cursor_ndx[CURSOR_PIPE_LEFT] = MSMFB_NEW_REQUEST;
6631 mdp5_data->cursor_ndx[CURSOR_PIPE_RIGHT] = MSMFB_NEW_REQUEST;
6632 mdp5_data->allow_kickoff = false;
6633
6634 mfd->mdp.private1 = mdp5_data;
6635 mfd->wait_for_kickoff = true;
6636
6637 rc = mdss_mdp_overlay_fb_parse_dt(mfd);
6638 if (rc)
6639 return rc;
6640
6641 /*
6642 * disable BWC if primary panel is video mode on specific
6643 * chipsets to workaround HW problem.
6644 */
6645 if (mdss_has_quirk(mdp5_data->mdata, MDSS_QUIRK_BWCPANIC) &&
6646 mfd->panel_info->type == MIPI_VIDEO_PANEL && (mfd->index == 0))
6647 mdp5_data->mdata->has_bwc = false;
6648
6649 mfd->panel_orientation = mfd->panel_info->panel_orientation;
6650
6651 if ((mfd->panel_info->panel_orientation & MDP_FLIP_LR) &&
6652 (mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY))
6653 mdp5_data->mixer_swap = true;
6654
6655 rc = sysfs_create_group(&dev->kobj, &mdp_overlay_sysfs_group);
6656 if (rc) {
6657 pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
6658 goto init_fail;
6659 }
6660
6661 mdp5_data->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd,
6662 "vsync_event");
6663 if (!mdp5_data->vsync_event_sd) {
6664 pr_err("vsync_event sysfs lookup failed\n");
6665 rc = -ENODEV;
6666 goto init_fail;
6667 }
6668
6669 mdp5_data->lineptr_event_sd = sysfs_get_dirent(dev->kobj.sd,
6670 "lineptr_event");
6671 if (!mdp5_data->lineptr_event_sd) {
6672 pr_err("lineptr_event sysfs lookup failed\n");
6673 rc = -ENODEV;
6674 goto init_fail;
6675 }
6676
6677 mdp5_data->hist_event_sd = sysfs_get_dirent(dev->kobj.sd,
6678 "hist_event");
6679 if (!mdp5_data->hist_event_sd) {
6680 pr_err("hist_event sysfs lookup failed\n");
6681 rc = -ENODEV;
6682 goto init_fail;
6683 }
6684
6685 mdp5_data->bl_event_sd = sysfs_get_dirent(dev->kobj.sd,
6686 "bl_event");
6687 if (!mdp5_data->bl_event_sd) {
6688 pr_err("bl_event sysfs lookup failed\n");
6689 rc = -ENODEV;
6690 goto init_fail;
6691 }
6692
6693 mdp5_data->ad_event_sd = sysfs_get_dirent(dev->kobj.sd,
6694 "ad_event");
6695 if (!mdp5_data->ad_event_sd) {
6696 pr_err("ad_event sysfs lookup failed\n");
6697 rc = -ENODEV;
6698 goto init_fail;
6699 }
6700
6701 mdp5_data->ad_bl_event_sd = sysfs_get_dirent(dev->kobj.sd,
6702 "ad_bl_event");
6703 if (!mdp5_data->ad_bl_event_sd) {
6704 pr_err("ad_bl_event sysfs lookup failed\n");
6705 rc = -ENODEV;
6706 goto init_fail;
6707 }
6708
6709 rc = sysfs_create_link_nowarn(&dev->kobj,
6710 &mdp5_data->mdata->pdev->dev.kobj, "mdp");
6711 if (rc)
6712 pr_warn("problem creating link to mdp sysfs\n");
6713
6714 rc = sysfs_create_link_nowarn(&dev->kobj,
6715 &mfd->pdev->dev.kobj, "mdss_fb");
6716 if (rc)
6717 pr_warn("problem creating link to mdss_fb sysfs\n");
6718
6719 if (mfd->panel_info->type == MIPI_VIDEO_PANEL ||
6720 mfd->panel_info->type == DTV_PANEL) {
6721 rc = sysfs_create_group(&dev->kobj,
6722 &dynamic_fps_fs_attrs_group);
6723 if (rc) {
6724 pr_err("Error dfps sysfs creation ret=%d\n", rc);
6725 goto init_fail;
6726 }
6727 }
6728
Krishna Manikandanb296a2b2018-03-21 17:16:31 +05306729 rc = __vsync_retire_setup(mfd);
6730 if (IS_ERR_VALUE((unsigned long)rc)) {
6731 pr_err("unable to create vsync timeline\n");
6732 goto init_fail;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306733 }
Krishna Manikandanb296a2b2018-03-21 17:16:31 +05306734
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306735 mfd->mdp_sync_pt_data.async_wait_fences = true;
6736
6737 pm_runtime_set_suspended(&mfd->pdev->dev);
6738 pm_runtime_enable(&mfd->pdev->dev);
6739
6740 kobject_uevent(&dev->kobj, KOBJ_ADD);
6741 pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
6742
6743 mdss_irq = mdss_intr_line();
6744
6745 /* Adding event timer only for primary panel */
6746 if ((mfd->index == 0) && (mfd->panel_info->type != WRITEBACK_PANEL)) {
6747 mdp5_data->cpu_pm_hdl = add_event_timer(mdss_irq->irq,
6748 mdss_mdp_ctl_event_timer, (void *)mdp5_data);
6749 if (!mdp5_data->cpu_pm_hdl)
6750 pr_warn("%s: unable to add event timer\n", __func__);
6751 }
6752
6753 if (mfd->panel_info->cont_splash_enabled) {
6754 rc = mdss_mdp_overlay_handoff(mfd);
6755 if (rc) {
6756 /*
6757 * Even though handoff failed, it is not fatal.
6758 * MDP can continue, just that we would have a longer
6759 * delay in transitioning from splash screen to boot
6760 * animation
6761 */
6762 pr_warn("Overlay handoff failed for fb%d. rc=%d\n",
6763 mfd->index, rc);
6764 rc = 0;
6765 }
6766 }
6767 mdp5_data->dyn_pu_state = mfd->panel_info->partial_update_enabled;
6768
6769 if (mdss_mdp_pp_overlay_init(mfd))
6770 pr_warn("Failed to initialize pp overlay data.\n");
6771 return rc;
6772init_fail:
6773 kfree(mdp5_data->frc_fsm);
6774init_fail1:
6775 kfree(mdp5_data);
6776 return rc;
6777}
6778
6779static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd)
6780{
6781 int rc = 0;
6782 struct platform_device *pdev = mfd->pdev;
6783 struct mdss_overlay_private *mdp5_mdata = mfd_to_mdp5_data(mfd);
6784
6785 mdp5_mdata->mixer_swap = of_property_read_bool(pdev->dev.of_node,
6786 "qcom,mdss-mixer-swap");
6787 if (mdp5_mdata->mixer_swap) {
6788 pr_info("mixer swap is enabled for fb device=%s\n",
6789 pdev->name);
6790 }
6791
6792 return rc;
6793}
6794
6795static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
6796 struct mdp_scale_luts_info *lut_tbl)
6797{
6798 struct mdss_mdp_qseed3_lut_tbl *qseed3_lut_tbl;
Animesh Kishoree8c50172018-03-16 16:11:23 +05306799 int ret = 0;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306800
6801 if (!mdata->scaler_off)
6802 return -EFAULT;
6803
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306804 mutex_lock(&mdata->scaler_off->scaler_lock);
6805
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306806 qseed3_lut_tbl = &mdata->scaler_off->lut_tbl;
6807 if ((lut_tbl->dir_lut_size !=
6808 DIR_LUT_IDX * DIR_LUT_COEFFS * sizeof(uint32_t)) ||
6809 (lut_tbl->cir_lut_size !=
6810 CIR_LUT_IDX * CIR_LUT_COEFFS * sizeof(uint32_t)) ||
6811 (lut_tbl->sep_lut_size !=
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306812 SEP_LUT_IDX * SEP_LUT_COEFFS * sizeof(uint32_t))) {
6813 mutex_unlock(&mdata->scaler_off->scaler_lock);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306814 return -EINVAL;
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306815 }
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306816
6817 if (!qseed3_lut_tbl->dir_lut) {
6818 qseed3_lut_tbl->dir_lut = devm_kzalloc(&mdata->pdev->dev,
6819 lut_tbl->dir_lut_size,
6820 GFP_KERNEL);
6821 if (!qseed3_lut_tbl->dir_lut) {
6822 ret = -ENOMEM;
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306823 goto err;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306824 }
6825 }
6826
6827 if (!qseed3_lut_tbl->cir_lut) {
6828 qseed3_lut_tbl->cir_lut = devm_kzalloc(&mdata->pdev->dev,
6829 lut_tbl->cir_lut_size,
6830 GFP_KERNEL);
6831 if (!qseed3_lut_tbl->cir_lut) {
6832 ret = -ENOMEM;
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306833 goto fail_free_dir_lut;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306834 }
6835 }
6836
6837 if (!qseed3_lut_tbl->sep_lut) {
6838 qseed3_lut_tbl->sep_lut = devm_kzalloc(&mdata->pdev->dev,
6839 lut_tbl->sep_lut_size,
6840 GFP_KERNEL);
6841 if (!qseed3_lut_tbl->sep_lut) {
6842 ret = -ENOMEM;
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306843 goto fail_free_cir_lut;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306844 }
6845 }
6846
6847 /* Invalidate before updating */
6848 qseed3_lut_tbl->valid = false;
6849
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306850 if (copy_from_user(qseed3_lut_tbl->dir_lut,
6851 (void *)(unsigned long)lut_tbl->dir_lut,
6852 lut_tbl->dir_lut_size)) {
6853 ret = -EINVAL;
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306854 goto fail_free_sep_lut;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306855 }
6856
6857 if (copy_from_user(qseed3_lut_tbl->cir_lut,
6858 (void *)(unsigned long)lut_tbl->cir_lut,
6859 lut_tbl->cir_lut_size)) {
6860 ret = -EINVAL;
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306861 goto fail_free_sep_lut;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306862 }
6863
6864 if (copy_from_user(qseed3_lut_tbl->sep_lut,
6865 (void *)(unsigned long)lut_tbl->sep_lut,
6866 lut_tbl->sep_lut_size)) {
6867 ret = -EINVAL;
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306868 goto fail_free_sep_lut;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306869 }
6870
6871 qseed3_lut_tbl->valid = true;
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306872 mutex_unlock(&mdata->scaler_off->scaler_lock);
6873
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306874 return ret;
6875
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306876fail_free_sep_lut:
6877 devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->sep_lut);
6878fail_free_cir_lut:
6879 devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->cir_lut);
6880fail_free_dir_lut:
6881 devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->dir_lut);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306882err:
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306883 qseed3_lut_tbl->dir_lut = NULL;
6884 qseed3_lut_tbl->cir_lut = NULL;
6885 qseed3_lut_tbl->sep_lut = NULL;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306886 qseed3_lut_tbl->valid = false;
Animesh Kishore9bd358e2018-02-23 18:04:28 +05306887 mutex_unlock(&mdata->scaler_off->scaler_lock);
6888
Sachin Bhayareeeb88892018-01-02 16:36:01 +05306889 return ret;
6890}
6891
6892static int mdss_mdp_set_cfg(struct msm_fb_data_type *mfd,
6893 struct mdp_set_cfg *cfg)
6894{
6895 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
6896 int ret = -EINVAL;
6897 struct mdp_scale_luts_info luts_info;
6898
6899 switch (cfg->flags) {
6900 case MDP_QSEED3_LUT_CFG:
6901 if (cfg->len != sizeof(luts_info)) {
6902 pr_err("invalid length %d expected %zd\n", cfg->len,
6903 sizeof(luts_info));
6904 ret = -EINVAL;
6905 break;
6906 }
6907 ret = copy_from_user(&luts_info,
6908 (void *)(unsigned long)cfg->payload, cfg->len);
6909 if (ret) {
6910 pr_err("qseed3 lut copy failed ret %d\n", ret);
6911 ret = -EFAULT;
6912 break;
6913 }
6914 ret = mdss_mdp_scaler_lut_init(mdata, &luts_info);
6915 break;
6916 default:
6917 break;
6918 }
6919 return ret;
6920}