blob: b1c80413d795ee5cc49275c69c7842434a7e4845 [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/errno.h>
17#include <linux/kernel.h>
18#include <linux/major.h>
19#include <linux/pm_runtime.h>
20#include <linux/uaccess.h>
21#include <linux/delay.h>
22#include <linux/msm_mdp.h>
23#include <linux/memblock.h>
Sachin Bhayareeeb88892018-01-02 16:36:01 +053024#include <linux/file.h>
25
26#include <soc/qcom/event_timer.h>
27#include "mdss.h"
28#include "mdss_debug.h"
29#include "mdss_fb.h"
30#include "mdss_mdp.h"
31#include "mdss_mdp_wfd.h"
Sachin Bhayare2b6d0042018-01-13 19:38:21 +053032#include "mdss_sync.h"
Sachin Bhayareeeb88892018-01-02 16:36:01 +053033
34#define CHECK_LAYER_BOUNDS(offset, size, max_size) \
35 (((size) > (max_size)) || ((offset) > ((max_size) - (size))))
36
37#define SCALER_ENABLED \
38 (MDP_LAYER_ENABLE_PIXEL_EXT | MDP_LAYER_ENABLE_QSEED3_SCALE)
39
40enum {
41 MDSS_MDP_RELEASE_FENCE = 0,
42 MDSS_MDP_RETIRE_FENCE,
43};
44
45enum layer_pipe_q {
46 LAYER_USES_NEW_PIPE_Q = 0,
47 LAYER_USES_USED_PIPE_Q,
48 LAYER_USES_DESTROY_PIPE_Q,
49};
50
51enum layer_zorder_used {
52 LAYER_ZORDER_NONE = 0,
53 LAYER_ZORDER_LEFT = 1,
54 LAYER_ZORDER_RIGHT = 2,
55 LAYER_ZORDER_BOTH = 3,
56};
57
58struct mdss_mdp_validate_info_t {
59 struct mdp_input_layer *layer;
60 struct mdss_mdp_pipe_multirect_params multirect;
61};
62
63/*
64 * __layer_needs_src_split() - check needs source split configuration
65 * @layer: input layer
66 *
67 * return true if the layer should be used as source split
68 */
69static bool __layer_needs_src_split(struct mdp_input_layer *layer)
70{
71 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
72
73 return (layer->flags & MDP_LAYER_ASYNC) ||
74 mdss_has_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
75}
76
77static int __async_update_position_check(struct msm_fb_data_type *mfd,
78 struct mdss_mdp_pipe *pipe, struct mdp_point *src,
79 struct mdp_point *dst)
80{
81 struct fb_var_screeninfo *var = &mfd->fbi->var;
82 u32 xres = var->xres;
83 u32 yres = var->yres;
84
85 if (!pipe->async_update
86 || CHECK_LAYER_BOUNDS(src->x, pipe->src.w, pipe->img_width)
87 || CHECK_LAYER_BOUNDS(src->y, pipe->src.h, pipe->img_height)
88 || CHECK_LAYER_BOUNDS(dst->x, pipe->dst.w, xres)
89 || CHECK_LAYER_BOUNDS(dst->y, pipe->dst.h, yres)) {
90 pr_err("invalid configs: async_update=%d, src:{%d,%d}, dst:{%d,%d}\n",
91 pipe->async_update, src->x, src->y, dst->x, dst->y);
92 pr_err("pipe:- src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
93 pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
94 pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
95 return -EINVAL;
96 }
97 return 0;
98}
99
100static int __cursor_layer_check(struct msm_fb_data_type *mfd,
101 struct mdp_input_layer *layer)
102{
103 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
104
105 if ((layer->z_order != HW_CURSOR_STAGE(mdata))
106 || layer->src_rect.w > mdata->max_cursor_size
107 || layer->src_rect.h > mdata->max_cursor_size
108 || layer->src_rect.w != layer->dst_rect.w
109 || layer->src_rect.h != layer->dst_rect.h
110 || !mdata->ncursor_pipes) {
111 pr_err("Incorrect cursor configs for pipe:%d, cursor_pipes:%d, z_order:%d\n",
112 layer->pipe_ndx, mdata->ncursor_pipes,
113 layer->z_order);
114 pr_err("src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
115 layer->src_rect.x, layer->src_rect.y,
116 layer->src_rect.w, layer->src_rect.h,
117 layer->dst_rect.x, layer->dst_rect.y,
118 layer->dst_rect.w, layer->dst_rect.h);
119 return -EINVAL;
120 }
121
122 return 0;
123}
124
125static int __layer_xres_check(struct msm_fb_data_type *mfd,
126 struct mdp_input_layer *layer)
127{
128 u32 xres = 0;
129 u32 left_lm_w = left_lm_w_from_mfd(mfd);
130 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
131 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
132
133 if (layer->dst_rect.x >= left_lm_w) {
134 if (mdata->has_src_split)
135 xres = left_lm_w;
136 else
137 layer->dst_rect.x -= left_lm_w;
138
139 if (ctl->mixer_right) {
140 xres += ctl->mixer_right->width;
141 } else {
142 pr_err("ov cannot be placed on right mixer\n");
143 return -EPERM;
144 }
145 } else {
146 if (ctl->mixer_left) {
147 xres = ctl->mixer_left->width;
148 } else {
149 pr_err("ov cannot be placed on left mixer\n");
150 return -EPERM;
151 }
152
153 if (mdata->has_src_split && ctl->mixer_right)
154 xres += ctl->mixer_right->width;
155 }
156
157 if (CHECK_LAYER_BOUNDS(layer->dst_rect.x, layer->dst_rect.w, xres)) {
158 pr_err("dst_xres is invalid. dst_x:%d, dst_w:%d, xres:%d\n",
159 layer->dst_rect.x, layer->dst_rect.w, xres);
160 return -EINVAL;
161 }
162
163 return 0;
164}
165
166static int __layer_param_check(struct msm_fb_data_type *mfd,
167 struct mdp_input_layer *layer, struct mdss_mdp_format_params *fmt,
168 enum mdss_mdp_pipe_rect rect_num)
169{
170 u32 yres;
171 u32 min_src_size, min_dst_size = 1;
172 int content_secure;
173 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
174 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
175 u32 src_w, src_h, dst_w, dst_h, width, height;
176
177 if (!ctl) {
178 pr_err("ctl is null\n");
179 return -EINVAL;
180 }
181
182 if (ctl->mixer_left) {
183 yres = ctl->mixer_left->height;
184 } else {
185 pr_debug("Using fb var screen infor for height\n");
186 yres = mfd->fbi->var.yres;
187 }
188
189 content_secure = (layer->flags & MDP_LAYER_SECURE_SESSION);
190 if (!ctl->is_secure && content_secure &&
191 (mfd->panel.type == WRITEBACK_PANEL)) {
192 pr_debug("return due to security concerns\n");
193 return -EPERM;
194 }
195 min_src_size = fmt->is_yuv ? 2 : 1;
196
197 if (layer->z_order >= (mdata->max_target_zorder + MDSS_MDP_STAGE_0)) {
198 pr_err("zorder %d out of range\n", layer->z_order);
199 return -EINVAL;
200 }
201
202 if (!mdss_mdp_pipe_search(mdata, layer->pipe_ndx, rect_num)) {
203 pr_err("layer pipe is invalid: 0x%x rect:%d\n",
204 layer->pipe_ndx, rect_num);
205 return -EINVAL;
206 }
207
208 width = layer->buffer.width;
209 height = layer->buffer.height;
210 if (layer->flags & MDP_LAYER_DEINTERLACE) {
211 width *= 2;
212 height /= 2;
213 }
214
215 if (layer->buffer.width > MAX_IMG_WIDTH ||
216 layer->buffer.height > MAX_IMG_HEIGHT ||
217 layer->src_rect.w < min_src_size ||
218 layer->src_rect.h < min_src_size ||
219 CHECK_LAYER_BOUNDS(layer->src_rect.x, layer->src_rect.w, width) ||
220 CHECK_LAYER_BOUNDS(layer->src_rect.y, layer->src_rect.h, height)) {
221 pr_err("invalid source image img flag=%d wh=%dx%d rect=%d,%d,%d,%d\n",
222 layer->flags, width, height,
223 layer->src_rect.x, layer->src_rect.y,
224 layer->src_rect.w, layer->src_rect.h);
225 return -EINVAL;
226 }
227
228 if (layer->dst_rect.w < min_dst_size ||
229 layer->dst_rect.h < min_dst_size) {
230 pr_err("invalid destination resolution (%dx%d)",
231 layer->dst_rect.w, layer->dst_rect.h);
232 return -EINVAL;
233 }
234
235 if (layer->horz_deci || layer->vert_deci) {
236 if (!mdata->has_decimation) {
237 pr_err("No Decimation in MDP V=%x\n", mdata->mdp_rev);
238 return -EINVAL;
239 } else if ((layer->horz_deci > MAX_DECIMATION) ||
240 (layer->vert_deci > MAX_DECIMATION)) {
241 pr_err("Invalid decimation factors horz=%d vert=%d\n",
242 layer->horz_deci, layer->vert_deci);
243 return -EINVAL;
244 } else if (layer->flags & MDP_LAYER_BWC) {
245 pr_err("Decimation can't be enabled with BWC\n");
246 return -EINVAL;
247 } else if (fmt->fetch_mode != MDSS_MDP_FETCH_LINEAR) {
248 pr_err("Decimation can't be enabled with MacroTile format\n");
249 return -EINVAL;
250 }
251 }
252
253 if (CHECK_LAYER_BOUNDS(layer->dst_rect.y, layer->dst_rect.h, yres)) {
254 pr_err("invalid vertical destination: y=%d, h=%d, yres=%d\n",
255 layer->dst_rect.y, layer->dst_rect.h, yres);
256 return -EOVERFLOW;
257 }
258
259 dst_w = layer->dst_rect.w;
260 dst_h = layer->dst_rect.h;
261
262 src_w = layer->src_rect.w >> layer->horz_deci;
263 src_h = layer->src_rect.h >> layer->vert_deci;
264
265 if (src_w > mdata->max_mixer_width) {
266 pr_err("invalid source width=%d HDec=%d\n",
267 layer->src_rect.w, layer->horz_deci);
268 return -EINVAL;
269 }
270
271 if ((src_w * MAX_UPSCALE_RATIO) < dst_w) {
272 pr_err("too much upscaling Width %d->%d\n",
273 layer->src_rect.w, layer->dst_rect.w);
274 return -E2BIG;
275 }
276
277 if ((src_h * MAX_UPSCALE_RATIO) < dst_h) {
278 pr_err("too much upscaling. Height %d->%d\n",
279 layer->src_rect.h, layer->dst_rect.h);
280 return -E2BIG;
281 }
282
283 if (src_w > (dst_w * MAX_DOWNSCALE_RATIO)) {
284 pr_err("too much downscaling. Width %d->%d H Dec=%d\n",
285 src_w, layer->dst_rect.w, layer->horz_deci);
286 return -E2BIG;
287 }
288
289 if (src_h > (dst_h * MAX_DOWNSCALE_RATIO)) {
290 pr_err("too much downscaling. Height %d->%d V Dec=%d\n",
291 src_h, layer->dst_rect.h, layer->vert_deci);
292 return -E2BIG;
293 }
294
295 if (layer->flags & MDP_LAYER_BWC) {
296 if ((layer->buffer.width != layer->src_rect.w) ||
297 (layer->buffer.height != layer->src_rect.h)) {
298 pr_err("BWC: mismatch of src img=%dx%d rect=%dx%d\n",
299 layer->buffer.width, layer->buffer.height,
300 layer->src_rect.w, layer->src_rect.h);
301 return -EINVAL;
302 }
303
304 if (layer->horz_deci || layer->vert_deci) {
305 pr_err("Can't enable BWC decode && decimate\n");
306 return -EINVAL;
307 }
308 }
309
310 if ((layer->flags & MDP_LAYER_DEINTERLACE) &&
311 !(layer->flags & SCALER_ENABLED)) {
312 if (layer->flags & MDP_SOURCE_ROTATED_90) {
313 if ((layer->src_rect.w % 4) != 0) {
314 pr_err("interlaced rect not h/4\n");
315 return -EINVAL;
316 }
317 } else if ((layer->src_rect.h % 4) != 0) {
318 pr_err("interlaced rect not h/4\n");
319 return -EINVAL;
320 }
321 }
322
323 if (fmt->is_yuv) {
324 if ((layer->src_rect.x & 0x1) || (layer->src_rect.y & 0x1) ||
325 (layer->src_rect.w & 0x1) || (layer->src_rect.h & 0x1)) {
326 pr_err("invalid odd src resolution or coordinates\n");
327 return -EINVAL;
328 }
329 }
330
331 return 0;
332}
333
334/* compare all reconfiguration parameter validation in this API */
335static int __validate_layer_reconfig(struct mdp_input_layer *layer,
336 struct mdss_mdp_pipe *pipe)
337{
338 int status = 0;
339 struct mdss_mdp_format_params *src_fmt;
340
341 /*
342 * csc registers are not double buffered. It is not permitted
343 * to change them on staged pipe with YUV layer.
344 */
345 if (pipe->csc_coeff_set != layer->color_space) {
346 src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
347 if (pipe->src_fmt->is_yuv && src_fmt && src_fmt->is_yuv) {
348 status = -EPERM;
349 pr_err("csc change is not permitted on used pipe\n");
350 }
351 }
352
353 return status;
354}
355
356static int __validate_single_layer(struct msm_fb_data_type *mfd,
357 struct mdss_mdp_validate_info_t *layer_info, u32 mixer_mux)
358{
359 u32 bwc_enabled;
360 int ret;
361 bool is_vig_needed = false;
362 struct mdss_mdp_format_params *fmt;
363 struct mdss_mdp_mixer *mixer = NULL;
364 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
365 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
366 struct mdp_input_layer *layer = layer_info->layer;
367 int ptype = get_pipe_type_from_ndx(layer->pipe_ndx);
368
369 if (ptype == MDSS_MDP_PIPE_TYPE_INVALID) {
370 pr_err("Invalid pipe ndx=%d\n", layer->pipe_ndx);
371 return -EINVAL;
372 }
373
374 if ((layer->dst_rect.w > mdata->max_mixer_width) ||
375 (layer->dst_rect.h > MAX_DST_H)) {
376 pr_err("exceeded max mixer supported resolution %dx%d\n",
377 layer->dst_rect.w, layer->dst_rect.h);
378 ret = -EINVAL;
379 goto exit_fail;
380 }
381
382 pr_debug("ctl=%u mux=%d z_order=%d flags=0x%x dst_x:%d\n",
383 mdp5_data->ctl->num, mixer_mux, layer->z_order,
384 layer->flags, layer->dst_rect.x);
385
386 fmt = mdss_mdp_get_format_params(layer->buffer.format);
387 if (!fmt) {
388 pr_err("invalid layer format %d\n", layer->buffer.format);
389 ret = -EINVAL;
390 goto exit_fail;
391 }
392
393 bwc_enabled = layer->flags & MDP_LAYER_BWC;
394
395 if (bwc_enabled) {
396 if (!mdp5_data->mdata->has_bwc) {
397 pr_err("layer uses bwc format but MDP does not support it\n");
398 ret = -EINVAL;
399 goto exit_fail;
400 }
401
402 layer->buffer.format =
403 mdss_mdp_get_rotator_dst_format(
404 layer->buffer.format, false, bwc_enabled);
405 fmt = mdss_mdp_get_format_params(layer->buffer.format);
406 if (!fmt) {
407 pr_err("invalid layer format %d\n",
408 layer->buffer.format);
409 ret = -EINVAL;
410 goto exit_fail;
411 }
412 }
413
414 if (ptype == MDSS_MDP_PIPE_TYPE_CURSOR) {
415 ret = __cursor_layer_check(mfd, layer);
416 if (ret)
417 goto exit_fail;
418 }
419
420 ret = __layer_xres_check(mfd, layer);
421 if (ret)
422 goto exit_fail;
423
424 ret = __layer_param_check(mfd, layer, fmt, layer_info->multirect.num);
425 if (ret)
426 goto exit_fail;
427
428 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
429 if (!mixer) {
430 pr_err("unable to get %s mixer\n",
431 (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) ?
432 "right" : "left");
433 ret = -EPERM;
434 goto exit_fail;
435 }
436
437 if (fmt->is_yuv || (mdata->has_non_scalar_rgb &&
438 ((layer->src_rect.w != layer->dst_rect.w) ||
439 (layer->src_rect.h != layer->dst_rect.h))))
440 is_vig_needed = true;
441
442 if (is_vig_needed && ptype != MDSS_MDP_PIPE_TYPE_VIG) {
443 pr_err("pipe is non-scalar ndx=%x\n", layer->pipe_ndx);
444 ret = -EINVAL;
445 goto exit_fail;
446 }
447
448 if (((ptype == MDSS_MDP_PIPE_TYPE_DMA) ||
449 (ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) &&
450 (layer->dst_rect.h != layer->src_rect.h ||
451 layer->dst_rect.w != layer->src_rect.w)) {
452 pr_err("no scaling supported on dma/cursor pipe, pipe num:%d\n",
453 layer->pipe_ndx);
454 return -EINVAL;
455 }
456
457exit_fail:
458 return ret;
459}
460
461static int __configure_pipe_params(struct msm_fb_data_type *mfd,
462 struct mdss_mdp_validate_info_t *vinfo, struct mdss_mdp_pipe *pipe,
463 struct mdss_mdp_pipe *left_blend_pipe, bool is_single_layer,
464 u32 mixer_mux)
465{
466 int ret = 0;
467 u32 left_lm_w = left_lm_w_from_mfd(mfd);
468 u32 flags;
469 bool is_right_blend = false;
470
471 struct mdss_mdp_mixer *mixer = NULL;
472 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
473 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
474 struct mdp_input_layer *layer = vinfo->layer;
475
476 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
477 pipe->src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
478 if (!pipe->src_fmt || !mixer) {
479 pr_err("invalid layer format:%d or mixer:%pK\n",
480 layer->buffer.format, pipe->mixer_left);
481 ret = -EINVAL;
482 goto end;
483 }
484
485 pipe->comp_ratio = layer->buffer.comp_ratio;
486
487 if (mfd->panel_orientation)
488 layer->flags ^= mfd->panel_orientation;
489
490 pipe->mixer_left = mixer;
491 pipe->mfd = mfd;
492 pipe->play_cnt = 0;
493 pipe->flags = 0;
494
495 if (layer->flags & MDP_LAYER_FLIP_LR)
496 pipe->flags = MDP_FLIP_LR;
497 if (layer->flags & MDP_LAYER_FLIP_UD)
498 pipe->flags |= MDP_FLIP_UD;
499 if (layer->flags & MDP_LAYER_SECURE_SESSION)
500 pipe->flags |= MDP_SECURE_OVERLAY_SESSION;
501 if (layer->flags & MDP_LAYER_SECURE_DISPLAY_SESSION)
502 pipe->flags |= MDP_SECURE_DISPLAY_OVERLAY_SESSION;
503 if (layer->flags & MDP_LAYER_SOLID_FILL)
504 pipe->flags |= MDP_SOLID_FILL;
505 if (layer->flags & MDP_LAYER_DEINTERLACE)
506 pipe->flags |= MDP_DEINTERLACE;
507 if (layer->flags & MDP_LAYER_BWC)
508 pipe->flags |= MDP_BWC_EN;
509 if (layer->flags & MDP_LAYER_PP)
510 pipe->flags |= MDP_OVERLAY_PP_CFG_EN;
511
512 pipe->is_fg = layer->flags & MDP_LAYER_FORGROUND;
513 pipe->img_width = layer->buffer.width & 0x3fff;
514 pipe->img_height = layer->buffer.height & 0x3fff;
515 pipe->src.x = layer->src_rect.x;
516 pipe->src.y = layer->src_rect.y;
517 pipe->src.w = layer->src_rect.w;
518 pipe->src.h = layer->src_rect.h;
519 pipe->dst.x = layer->dst_rect.x;
520 pipe->dst.y = layer->dst_rect.y;
521 pipe->dst.w = layer->dst_rect.w;
522 pipe->dst.h = layer->dst_rect.h;
523 pipe->horz_deci = layer->horz_deci;
524 pipe->vert_deci = layer->vert_deci;
525 pipe->bg_color = layer->bg_color;
526 pipe->alpha = layer->alpha;
527 pipe->transp = layer->transp_mask;
528 pipe->blend_op = layer->blend_op;
529 pipe->is_handed_off = false;
530 pipe->async_update = (layer->flags & MDP_LAYER_ASYNC) ? true : false;
531 pipe->csc_coeff_set = layer->color_space;
532
533 if (mixer->ctl) {
534 pipe->dst.x += mixer->ctl->border_x_off;
535 pipe->dst.y += mixer->ctl->border_y_off;
536 pr_debug("border{%d,%d}\n", mixer->ctl->border_x_off,
537 mixer->ctl->border_y_off);
538 }
539 pr_debug("src{%d,%d,%d,%d}, dst{%d,%d,%d,%d}\n",
540 pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
541 pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
542
543 if (layer->flags & SCALER_ENABLED)
544 memcpy(&pipe->scaler, layer->scale,
545 sizeof(struct mdp_scale_data_v2));
546
547 pipe->scaler.enable = (layer->flags & SCALER_ENABLED);
548
549 flags = pipe->flags;
550 if (is_single_layer)
551 flags |= PERF_CALC_PIPE_SINGLE_LAYER;
552
553 /*
554 * async update is allowed only in video mode panels with single LM
555 * or dual LM with src_split enabled.
556 */
557 if (pipe->async_update && ((is_split_lm(mfd) && !mdata->has_src_split)
558 || (!mdp5_data->ctl->is_video_mode))) {
559 pr_err("async update allowed only in video mode panel with src_split\n");
560 ret = -EINVAL;
561 goto end;
562 }
563
564 /*
565 * unstage the pipe if it's current z_order does not match with new
566 * z_order because client may only call the validate.
567 */
568 if (pipe->mixer_stage != layer->z_order)
569 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
570
571 /*
572 * check if overlay span across two mixers and if source split is
573 * available. If yes, enable src_split_req flag so that during mixer
574 * staging, same pipe will be stagged on both layer mixers.
575 */
576 if (mdata->has_src_split) {
577 is_right_blend = pipe->is_right_blend;
578 if (left_blend_pipe) {
579 if (pipe->priority <= left_blend_pipe->priority) {
580 pr_err("priority limitation. left:%d right%d\n",
581 left_blend_pipe->priority,
582 pipe->priority);
583 ret = -EPERM;
584 goto end;
585 } else {
586 pr_debug("pipe%d is a right_pipe\n", pipe->num);
587 is_right_blend = true;
588 }
589 } else if (pipe->is_right_blend) {
590 /*
591 * pipe used to be right blend. So need to update mixer
592 * configuration to remove it as a right blend.
593 */
594 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
595 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
596 is_right_blend = false;
597 }
598
599 if (is_split_lm(mfd) && __layer_needs_src_split(layer)) {
600 pipe->src_split_req = true;
601 } else if ((mixer_mux == MDSS_MDP_MIXER_MUX_LEFT) &&
602 ((layer->dst_rect.x + layer->dst_rect.w) > mixer->width)) {
603 if (layer->dst_rect.x >= mixer->width) {
604 pr_err("%pS: err dst_x can't lie in right half",
605 __builtin_return_address(0));
606 pr_cont(" flags:0x%x dst x:%d w:%d lm_w:%d\n",
607 layer->flags, layer->dst_rect.x,
608 layer->dst_rect.w, mixer->width);
609 ret = -EINVAL;
610 goto end;
611 } else {
612 pipe->src_split_req = true;
613 }
614 } else {
615 if (pipe->src_split_req) {
616 mdss_mdp_mixer_pipe_unstage(pipe,
617 pipe->mixer_right);
618 pipe->mixer_right = NULL;
619 }
620 pipe->src_split_req = false;
621 }
622 pipe->is_right_blend = is_right_blend;
623 }
624
625 pipe->multirect.mode = vinfo->multirect.mode;
626 pipe->mixer_stage = layer->z_order;
627
628 if (mfd->panel_orientation & MDP_FLIP_LR)
629 pipe->dst.x = pipe->mixer_left->width - pipe->dst.x -
630 pipe->dst.w;
631 if (mfd->panel_orientation & MDP_FLIP_UD)
632 pipe->dst.y = pipe->mixer_left->height - pipe->dst.y -
633 pipe->dst.h;
634
635 memcpy(&pipe->layer, layer, sizeof(struct mdp_input_layer));
636
637 mdss_mdp_overlay_set_chroma_sample(pipe);
638
639 if (pipe->blend_op == BLEND_OP_NOT_DEFINED)
640 pipe->blend_op = pipe->src_fmt->alpha_enable ?
641 BLEND_OP_PREMULTIPLIED : BLEND_OP_OPAQUE;
642
643 if (pipe->src_fmt->is_yuv && !(pipe->flags & MDP_SOURCE_ROTATED_90) &&
644 !pipe->scaler.enable) {
645 pipe->overfetch_disable = OVERFETCH_DISABLE_BOTTOM;
646
647 if (pipe->dst.x >= left_lm_w)
648 pipe->overfetch_disable |= OVERFETCH_DISABLE_RIGHT;
649 pr_debug("overfetch flags=%x\n", pipe->overfetch_disable);
650 } else {
651 pipe->overfetch_disable = 0;
652 }
653
654 /*
655 * When scaling is enabled src crop and image
656 * width and height is modified by user
657 */
658 if ((pipe->flags & MDP_DEINTERLACE) && !pipe->scaler.enable) {
659 if (pipe->flags & MDP_SOURCE_ROTATED_90) {
660 pipe->src.x = DIV_ROUND_UP(pipe->src.x, 2);
661 pipe->src.x &= ~1;
662 pipe->src.w /= 2;
663 pipe->img_width /= 2;
664 } else {
665 pipe->src.h /= 2;
666 pipe->src.y = DIV_ROUND_UP(pipe->src.y, 2);
667 pipe->src.y &= ~1;
668 }
669 }
670
671 ret = mdss_mdp_overlay_setup_scaling(pipe);
672 if (ret) {
673 pr_err("scaling setup failed %d\n", ret);
674 goto end;
675 }
676
677 if (layer->flags & MDP_LAYER_PP) {
678 memcpy(&pipe->pp_cfg, layer->pp_info,
679 sizeof(struct mdp_overlay_pp_params));
680 ret = mdss_mdp_pp_sspp_config(pipe);
681 if (ret) {
682 pr_err("pp setup failed %d\n", ret);
683 goto end;
684 }
685 }
686
687 if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
688 goto end;
689
690 ret = mdp_pipe_tune_perf(pipe, flags);
691 if (ret) {
692 pr_err("unable to satisfy performance. ret=%d\n", ret);
693 goto end;
694 }
695
696 ret = mdss_mdp_smp_reserve(pipe);
697 if (ret) {
698 pr_err("mdss_mdp_smp_reserve failed. pnum:%d ret=%d\n",
699 pipe->num, ret);
700 goto end;
701 }
702end:
703 return ret;
704}
705
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530706static struct mdss_fence *__create_fence(struct msm_fb_data_type *mfd,
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530707 struct msm_sync_pt_data *sync_pt_data, u32 fence_type,
708 int *fence_fd, int value)
709{
710 struct mdss_overlay_private *mdp5_data;
711 struct mdss_mdp_ctl *ctl;
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530712 struct mdss_fence *sync_fence = NULL;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530713 char fence_name[32];
714
715 mdp5_data = mfd_to_mdp5_data(mfd);
716
717 ctl = mdp5_data->ctl;
718 if (!ctl->ops.add_vsync_handler) {
719 pr_err("fb%d vsync pending first update\n", mfd->index);
720 return ERR_PTR(-EOPNOTSUPP);
721 }
722
723 if (!mdss_mdp_ctl_is_power_on(ctl)) {
724 pr_err("fb%d ctl power on failed\n", mfd->index);
725 return ERR_PTR(-EPERM);
726 }
727
728 if (fence_type == MDSS_MDP_RETIRE_FENCE)
729 snprintf(fence_name, sizeof(fence_name), "fb%d_retire",
730 mfd->index);
731 else
732 snprintf(fence_name, sizeof(fence_name), "fb%d_release",
733 mfd->index);
734
735 if ((fence_type == MDSS_MDP_RETIRE_FENCE) &&
736 (mfd->panel.type == MIPI_CMD_PANEL)) {
737 if (mdp5_data->vsync_timeline) {
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530738 value = 1 + mdp5_data->retire_cnt++;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530739 sync_fence = mdss_fb_sync_get_fence(
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530740 mdp5_data->vsync_timeline, fence_name,
741 value);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530742 } else {
743 return ERR_PTR(-EPERM);
744 }
745 } else {
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530746 if (fence_type == MDSS_MDP_RETIRE_FENCE)
747 sync_fence = mdss_fb_sync_get_fence(
748 sync_pt_data->timeline_retire,
749 fence_name, value);
750 else
751 sync_fence = mdss_fb_sync_get_fence(
752 sync_pt_data->timeline,
753 fence_name, value);
754
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530755 }
756
757 if (IS_ERR_OR_NULL(sync_fence)) {
758 pr_err("%s: unable to retrieve release fence\n", fence_name);
759 goto end;
760 }
761
762 /* get fence fd */
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530763 *fence_fd = mdss_get_sync_fence_fd(sync_fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530764 if (*fence_fd < 0) {
765 pr_err("%s: get_unused_fd_flags failed error:0x%x\n",
766 fence_name, *fence_fd);
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530767 mdss_put_sync_fence(sync_fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530768 sync_fence = NULL;
769 goto end;
770 }
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530771 pr_debug("%s:val=%d\n", mdss_get_sync_fence_name(sync_fence), value);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530772
773end:
774 return sync_fence;
775}
776
777/*
778 * __handle_buffer_fences() - copy sync fences and return release/retire
779 * fence to caller.
780 *
781 * This function copies all input sync fences to acquire fence array and
782 * returns release/retire fences to caller. It acts like buff_sync ioctl.
783 */
784static int __handle_buffer_fences(struct msm_fb_data_type *mfd,
785 struct mdp_layer_commit_v1 *commit, struct mdp_input_layer *layer_list)
786{
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530787 struct mdss_fence *fence, *release_fence, *retire_fence;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530788 struct msm_sync_pt_data *sync_pt_data = NULL;
789 struct mdp_input_layer *layer;
790 int value;
791
792 u32 acq_fen_count, i, ret = 0;
793 u32 layer_count = commit->input_layer_cnt;
794
795 sync_pt_data = &mfd->mdp_sync_pt_data;
796 if (!sync_pt_data) {
797 pr_err("sync point data are NULL\n");
798 return -EINVAL;
799 }
800
801 i = mdss_fb_wait_for_fence(sync_pt_data);
802 if (i > 0)
803 pr_warn("%s: waited on %d active fences\n",
804 sync_pt_data->fence_name, i);
805
806 mutex_lock(&sync_pt_data->sync_mutex);
807 for (i = 0, acq_fen_count = 0; i < layer_count; i++) {
808 layer = &layer_list[i];
809
810 if (layer->buffer.fence < 0)
811 continue;
812
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530813 fence = mdss_get_fd_sync_fence(layer->buffer.fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530814 if (!fence) {
815 pr_err("%s: sync fence get failed! fd=%d\n",
816 sync_pt_data->fence_name, layer->buffer.fence);
817 ret = -EINVAL;
818 break;
819 }
820 sync_pt_data->acq_fen[acq_fen_count++] = fence;
821 }
822 sync_pt_data->acq_fen_cnt = acq_fen_count;
823 if (ret)
824 goto sync_fence_err;
825
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530826 value = sync_pt_data->threshold +
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530827 atomic_read(&sync_pt_data->commit_cnt);
828
829 release_fence = __create_fence(mfd, sync_pt_data,
830 MDSS_MDP_RELEASE_FENCE, &commit->release_fence, value);
831 if (IS_ERR_OR_NULL(release_fence)) {
832 pr_err("unable to retrieve release fence\n");
833 ret = PTR_ERR(release_fence);
834 goto release_fence_err;
835 }
836
837 retire_fence = __create_fence(mfd, sync_pt_data,
838 MDSS_MDP_RETIRE_FENCE, &commit->retire_fence, value);
839 if (IS_ERR_OR_NULL(retire_fence)) {
840 pr_err("unable to retrieve retire fence\n");
841 ret = PTR_ERR(retire_fence);
842 goto retire_fence_err;
843 }
844
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530845 mutex_unlock(&sync_pt_data->sync_mutex);
846 return ret;
847
848retire_fence_err:
849 put_unused_fd(commit->release_fence);
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530850 mdss_put_sync_fence(release_fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530851release_fence_err:
852 commit->retire_fence = -1;
853 commit->release_fence = -1;
854sync_fence_err:
855 for (i = 0; i < sync_pt_data->acq_fen_cnt; i++)
Sachin Bhayare2b6d0042018-01-13 19:38:21 +0530856 mdss_put_sync_fence(sync_pt_data->acq_fen[i]);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530857 sync_pt_data->acq_fen_cnt = 0;
858
859 mutex_unlock(&sync_pt_data->sync_mutex);
860
861 return ret;
862}
863
864/*
865 * __map_layer_buffer() - map input layer buffer
866 *
867 * This function maps input layer buffer. It supports only single layer
868 * buffer mapping right now. This is case for all formats including UBWC.
869 */
870static struct mdss_mdp_data *__map_layer_buffer(struct msm_fb_data_type *mfd,
871 struct mdss_mdp_pipe *pipe,
872 struct mdss_mdp_validate_info_t *validate_info_list,
873 u32 layer_count)
874{
875 struct mdss_mdp_data *src_data;
876 struct mdp_input_layer *layer = NULL;
877 struct mdp_layer_buffer *buffer;
878 struct msmfb_data image;
879 int i, ret;
880 u32 flags;
881 struct mdss_mdp_validate_info_t *vitem;
882
883 for (i = 0; i < layer_count; i++) {
884 vitem = &validate_info_list[i];
885 layer = vitem->layer;
886 if ((layer->pipe_ndx == pipe->ndx) &&
887 (vitem->multirect.num == pipe->multirect.num))
888 break;
889 }
890
891 if (i == layer_count) {
892 pr_err("layer count index is out of bound\n");
893 src_data = ERR_PTR(-EINVAL);
894 goto end;
895 }
896
897 buffer = &layer->buffer;
898
899 if (pipe->flags & MDP_SOLID_FILL) {
900 pr_err("Unexpected buffer queue to a solid fill pipe\n");
901 src_data = ERR_PTR(-EINVAL);
902 goto end;
903 }
904
905 flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
906 MDP_SECURE_DISPLAY_OVERLAY_SESSION));
907
908 if (buffer->planes[0].fd < 0) {
909 pr_err("invalid file descriptor for layer buffer\n");
910 src_data = ERR_PTR(-EINVAL);
911 goto end;
912 }
913
914 src_data = mdss_mdp_overlay_buf_alloc(mfd, pipe);
915 if (!src_data) {
916 pr_err("unable to allocate source buffer\n");
917 src_data = ERR_PTR(-ENOMEM);
918 goto end;
919 }
920 memset(&image, 0, sizeof(image));
921
922 image.memory_id = buffer->planes[0].fd;
923 image.offset = buffer->planes[0].offset;
924 ret = mdss_mdp_data_get_and_validate_size(src_data, &image, 1,
925 flags, &mfd->pdev->dev, false, DMA_TO_DEVICE,
926 buffer);
927 if (ret)
928 goto end_buf_free;
929
930 src_data->num_planes = 1;
931 return src_data;
932
933end_buf_free:
934 mdss_mdp_overlay_buf_free(mfd, src_data);
935 src_data = ERR_PTR(ret);
936end:
937 return src_data;
938}
939
940static inline bool __compare_layer_config(struct mdp_input_layer *validate,
941 struct mdss_mdp_pipe *pipe)
942{
943 struct mdp_input_layer *layer = &pipe->layer;
944 bool status = true;
945
946 status = !memcmp(&validate->src_rect, &layer->src_rect,
947 sizeof(validate->src_rect)) &&
948 !memcmp(&validate->dst_rect, &layer->dst_rect,
949 sizeof(validate->dst_rect)) &&
950 validate->flags == layer->flags &&
951 validate->horz_deci == layer->horz_deci &&
952 validate->vert_deci == layer->vert_deci &&
953 validate->alpha == layer->alpha &&
954 validate->color_space == layer->color_space &&
955 validate->z_order == (layer->z_order - MDSS_MDP_STAGE_0) &&
956 validate->transp_mask == layer->transp_mask &&
957 validate->bg_color == layer->bg_color &&
958 validate->blend_op == layer->blend_op &&
959 validate->buffer.width == layer->buffer.width &&
960 validate->buffer.height == layer->buffer.height &&
961 validate->buffer.format == layer->buffer.format;
962
963 if (status && (validate->flags & SCALER_ENABLED))
964 status = !memcmp(validate->scale, &pipe->scaler,
965 sizeof(pipe->scaler));
966
967 return status;
968}
969
970/*
971 * __find_layer_in_validate_q() - Search layer in validation queue
972 *
973 * This functions helps to skip validation for layers where only buffer is
974 * changing. For ex: video playback case. In order to skip validation, it
975 * compares all input layer params except buffer handle, offset, fences.
976 */
977static struct mdss_mdp_pipe *__find_layer_in_validate_q(
978 struct mdss_mdp_validate_info_t *vinfo,
979 struct mdss_overlay_private *mdp5_data)
980{
981 bool found = false;
982 struct mdss_mdp_pipe *pipe;
983 struct mdp_input_layer *layer = vinfo->layer;
984
985 mutex_lock(&mdp5_data->list_lock);
986 list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
987 if ((pipe->ndx == layer->pipe_ndx) &&
988 (pipe->multirect.num == vinfo->multirect.num)) {
989 if (__compare_layer_config(layer, pipe))
990 found = true;
991 break;
992 }
993 }
994 mutex_unlock(&mdp5_data->list_lock);
995
996 return found ? pipe : NULL;
997}
998
999static bool __find_pipe_in_list(struct list_head *head,
1000 int pipe_ndx, struct mdss_mdp_pipe **out_pipe,
1001 enum mdss_mdp_pipe_rect rect_num)
1002{
1003 struct mdss_mdp_pipe *pipe;
1004
1005 list_for_each_entry(pipe, head, list) {
1006 if ((pipe_ndx == pipe->ndx) &&
1007 (rect_num == pipe->multirect.num)) {
1008 *out_pipe = pipe;
1009 return true;
1010 }
1011 }
1012
1013 return false;
1014}
1015
1016/*
1017 * Search pipe from destroy and cleanup list to avoid validation failure.
1018 * It is caller responsibility to hold the list lock before calling this API.
1019 */
1020static struct mdss_mdp_pipe *__find_and_move_cleanup_pipe(
1021 struct mdss_overlay_private *mdp5_data, u32 pipe_ndx,
1022 enum mdss_mdp_pipe_rect rect_num)
1023{
1024 struct mdss_mdp_pipe *pipe = NULL;
1025
1026 if (__find_pipe_in_list(&mdp5_data->pipes_destroy,
1027 pipe_ndx, &pipe, rect_num)) {
1028 pr_debug("reuse destroy pipe id:%d ndx:%d rect:%d\n",
1029 pipe->num, pipe_ndx, rect_num);
1030 list_move(&pipe->list, &mdp5_data->pipes_used);
1031 } else if (__find_pipe_in_list(&mdp5_data->pipes_cleanup,
1032 pipe_ndx, &pipe, rect_num)) {
1033 pr_debug("reuse cleanup pipe id:%d ndx:%d rect:%d\n",
1034 pipe->num, pipe_ndx, rect_num);
1035 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
1036 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
1037 pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
1038 list_move(&pipe->list, &mdp5_data->pipes_used);
1039 }
1040
1041 return pipe;
1042}
1043
1044/*
1045 * __assign_pipe_for_layer() - get a pipe for layer
1046 *
1047 * This function first searches the pipe from used list, cleanup list and
1048 * destroy list. On successful search, it returns the same pipe for current
1049 * layer. It also un-stage the pipe from current mixer for used, cleanup,
1050 * destroy pipes if they switches the mixer. On failure search, it returns
1051 * the null pipe.
1052 */
1053static struct mdss_mdp_pipe *__assign_pipe_for_layer(
1054 struct msm_fb_data_type *mfd,
1055 struct mdss_mdp_mixer *mixer, u32 pipe_ndx,
1056 enum layer_pipe_q *pipe_q_type,
1057 enum mdss_mdp_pipe_rect rect_num)
1058{
1059 struct mdss_mdp_pipe *pipe = NULL;
1060 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1061 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
1062
1063 mutex_lock(&mdp5_data->list_lock);
1064 __find_pipe_in_list(&mdp5_data->pipes_used, pipe_ndx, &pipe, rect_num);
1065 if (IS_ERR_OR_NULL(pipe)) {
1066 pipe = __find_and_move_cleanup_pipe(mdp5_data,
1067 pipe_ndx, rect_num);
1068 if (IS_ERR_OR_NULL(pipe))
1069 *pipe_q_type = LAYER_USES_NEW_PIPE_Q;
1070 else
1071 *pipe_q_type = LAYER_USES_DESTROY_PIPE_Q;
1072 } else {
1073 *pipe_q_type = LAYER_USES_USED_PIPE_Q;
1074 }
1075 mutex_unlock(&mdp5_data->list_lock);
1076
1077 /* found the pipe from used, destroy or cleanup list */
1078 if (!IS_ERR_OR_NULL(pipe)) {
1079 if (pipe->mixer_left != mixer) {
1080 if (!mixer->ctl || (mixer->ctl->mfd != mfd)) {
1081 pr_err("Can't switch mixer %d->%d pnum %d!\n",
1082 pipe->mixer_left->num, mixer->num,
1083 pipe->num);
1084 pipe = ERR_PTR(-EINVAL);
1085 goto end;
1086 }
1087 pr_debug("switching pipe%d mixer %d->%d\n",
1088 pipe->num,
1089 pipe->mixer_left ? pipe->mixer_left->num : -1,
1090 mixer->num);
1091 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
1092 pipe->mixer_left = mixer;
1093 }
1094 goto end;
1095 }
1096
1097 pipe = mdss_mdp_pipe_assign(mdata, mixer, pipe_ndx, rect_num);
1098 if (IS_ERR_OR_NULL(pipe)) {
1099 pr_err("error reserving pipe. pipe_ndx=0x%x rect_num=%d mfd ndx=%d\n",
1100 pipe_ndx, rect_num, mfd->index);
1101 goto end;
1102 }
1103
1104 mutex_lock(&mdp5_data->list_lock);
1105 list_add(&pipe->list, &mdp5_data->pipes_used);
1106 mutex_unlock(&mdp5_data->list_lock);
1107
1108end:
1109 if (!IS_ERR_OR_NULL(pipe)) {
1110 pipe->dirty = false;
1111 pipe->params_changed++;
1112 }
1113 return pipe;
1114}
1115
1116/*
1117 * __is_sd_state_valid() - validate secure display state
1118 *
1119 * This function checks if the current state of secrure display is valid,
1120 * based on the new settings.
1121 * For command mode panels, the sd state would be invalid if a non secure pipe
1122 * comes and one of the below condition is met:
1123 * 1) Secure Display is enabled for current client, and there is other
1124 secure client.
1125 * 2) Secure Display is disabled for current client, and there is other
1126 secure client.
1127 * 3) Secure pipes are already staged for the current client.
1128 * For other panels, the sd state would be invalid if a non secure pipe comes
1129 * and one of the below condition is met:
1130 * 1) Secure Display is enabled for current or other client.
1131 * 2) Secure pipes are already staged for the current client.
1132 *
1133 */
1134static inline bool __is_sd_state_valid(uint32_t sd_pipes, uint32_t nonsd_pipes,
1135 int panel_type, u32 sd_enabled)
1136{
1137 if (panel_type == MIPI_CMD_PANEL) {
1138 if ((((mdss_get_sd_client_cnt() > 1) && sd_enabled) ||
1139 (mdss_get_sd_client_cnt() && !sd_enabled) ||
1140 sd_pipes)
1141 && nonsd_pipes)
1142 return false;
1143 } else {
1144 if ((sd_pipes || mdss_get_sd_client_cnt()) && nonsd_pipes)
1145 return false;
1146 }
1147 return true;
1148}
1149
1150/*
1151 * __validate_secure_display() - validate secure display
1152 *
1153 * This function travers through used pipe list and checks if any pipe
1154 * is with secure display enabled flag. It fails if client tries to stage
1155 * unsecure content with secure display session.
1156 *
1157 */
1158static int __validate_secure_display(struct mdss_overlay_private *mdp5_data)
1159{
1160 struct mdss_mdp_pipe *pipe, *tmp;
1161 uint32_t sd_pipes = 0, nonsd_pipes = 0;
1162 int panel_type = mdp5_data->ctl->panel_data->panel_info.type;
1163 int ret = 0;
1164
1165 mutex_lock(&mdp5_data->list_lock);
1166 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
1167 if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
1168 sd_pipes++;
1169 else
1170 nonsd_pipes++;
1171 }
1172 mutex_unlock(&mdp5_data->list_lock);
1173
1174 pr_debug("pipe count:: secure display:%d non-secure:%d\n",
1175 sd_pipes, nonsd_pipes);
1176
1177 mdp5_data->sd_transition_state = SD_TRANSITION_NONE;
1178 if (!__is_sd_state_valid(sd_pipes, nonsd_pipes, panel_type,
1179 mdp5_data->sd_enabled)) {
1180 pr_err("non-secure layer validation request during secure display session\n");
1181 pr_err(" secure client cnt:%d secure pipe cnt:%d non-secure pipe cnt:%d\n",
1182 mdss_get_sd_client_cnt(), sd_pipes, nonsd_pipes);
1183 ret = -EINVAL;
1184 } else if (!mdp5_data->sd_enabled && sd_pipes) {
1185 mdp5_data->sd_transition_state =
1186 SD_TRANSITION_NON_SECURE_TO_SECURE;
1187 } else if (mdp5_data->sd_enabled && !sd_pipes) {
1188 mdp5_data->sd_transition_state =
1189 SD_TRANSITION_SECURE_TO_NON_SECURE;
1190 }
1191 return ret;
1192}
1193
1194/*
1195 * __handle_free_list() - updates free pipe list
1196 *
1197 * This function travers through used pipe list and checks if any pipe
1198 * is not staged in current validation cycle. It moves the pipe to cleanup
1199 * list if no layer is attached for that pipe.
1200 *
1201 * This should be called after validation is successful for current cycle.
1202 * Moving pipes before can affects staged pipe for previous cycle.
1203 */
1204static void __handle_free_list(struct mdss_overlay_private *mdp5_data,
1205 struct mdss_mdp_validate_info_t *validate_info_list, u32 layer_count)
1206{
1207 int i;
1208 struct mdp_input_layer *layer;
1209 struct mdss_mdp_validate_info_t *vinfo;
1210 struct mdss_mdp_pipe *pipe, *tmp;
1211
1212 mutex_lock(&mdp5_data->list_lock);
1213 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
1214 for (i = 0; i < layer_count; i++) {
1215 vinfo = &validate_info_list[i];
1216 layer = vinfo->layer;
1217
1218 if ((pipe->ndx == layer->pipe_ndx) &&
1219 (pipe->multirect.num == vinfo->multirect.num))
1220 break;
1221 }
1222
1223 /*
1224 * if validate cycle is not attaching any layer for this
1225 * pipe then move it to cleanup list. It does overlay_unset
1226 * task.
1227 */
1228 if (i == layer_count)
1229 list_move(&pipe->list, &mdp5_data->pipes_cleanup);
1230 }
1231 mutex_unlock(&mdp5_data->list_lock);
1232}
1233
1234static bool __multirect_validate_flip(struct mdp_input_layer **layers,
1235 size_t count)
1236{
1237 /* not supporting more than 2 layers */
1238 if (count != 2)
1239 return false;
1240
1241 /* flip related validation */
1242 if ((layers[0]->flags & MDP_LAYER_FLIP_LR) ||
1243 (layers[1]->flags & MDP_LAYER_FLIP_LR)) {
1244 pr_err("multirect and HFLIP is not allowed. input layer flags=0x%x paired layer flags=0x%x\n",
1245 layers[0]->flags, layers[1]->flags);
1246 return false;
1247 }
1248 if ((layers[0]->flags & MDP_LAYER_FLIP_UD) !=
1249 (layers[1]->flags & MDP_LAYER_FLIP_UD)) {
1250 pr_err("multirect VLFIP mismatch is not allowed\n");
1251 return false;
1252 }
1253
1254 return true;
1255}
1256
1257static bool __multirect_validate_format(struct mdp_input_layer **layers,
1258 size_t count)
1259{
1260 struct mdss_mdp_format_params *rec0_fmt, *rec1_fmt;
1261 bool is_ubwc;
1262
1263 /* not supporting more than 2 layers */
1264 if (count != 2)
1265 return false;
1266
1267 /* format related validation */
1268 rec0_fmt = mdss_mdp_get_format_params(layers[0]->buffer.format);
1269 if (!rec0_fmt) {
1270 pr_err("invalid input layer format %d\n",
1271 layers[0]->buffer.format);
1272 return false;
1273 }
1274 rec1_fmt = mdss_mdp_get_format_params(layers[1]->buffer.format);
1275 if (!rec1_fmt) {
1276 pr_err("invalid paired layer format %d\n",
1277 layers[1]->buffer.format);
1278 return false;
1279 }
1280 if (rec0_fmt->is_yuv || rec1_fmt->is_yuv) {
1281 pr_err("multirect on YUV format is not supported. input=%d paired=%d\n",
1282 rec0_fmt->is_yuv, rec1_fmt->is_yuv);
1283 return false;
1284 }
1285 if (rec0_fmt->fetch_mode != rec1_fmt->fetch_mode) {
1286 pr_err("multirect fetch_mode mismatch is not allowed. input=%d paired=%d\n",
1287 rec0_fmt->fetch_mode, rec1_fmt->fetch_mode);
1288 return false;
1289 }
1290 is_ubwc = mdss_mdp_is_ubwc_format(rec0_fmt);
1291 if (is_ubwc && (rec0_fmt != rec1_fmt)) {
1292 pr_err("multirect UBWC format mismatch is not allowed\n");
1293 return false;
1294 } else if (rec0_fmt->bpp != rec1_fmt->bpp) {
1295 pr_err("multirect linear format bpp mismatch is not allowed. input=%d paired=%d\n",
1296 rec0_fmt->bpp, rec1_fmt->bpp);
1297 return false;
1298 } else if (rec0_fmt->unpack_dx_format != rec1_fmt->unpack_dx_format) {
1299 pr_err("multirect linear format 10bit vs 8bit mismatch is not allowed. input=%d paired=%d\n",
1300 rec0_fmt->unpack_dx_format, rec1_fmt->unpack_dx_format);
1301 return false;
1302 }
1303
1304 if ((layers[0]->flags & MDP_LAYER_SOLID_FILL) !=
1305 (layers[1]->flags & MDP_LAYER_SOLID_FILL)) {
1306 pr_err("solid fill mismatch between multirect layers\n");
1307 return false;
1308 }
1309
1310 return true;
1311}
1312
1313static bool __multirect_validate_rects(struct mdp_input_layer **layers,
1314 size_t count)
1315{
1316 struct mdss_rect dst[MDSS_MDP_PIPE_MAX_RECTS];
1317 int i;
1318
1319 /* not supporting more than 2 layers */
1320 if (count != 2)
1321 return false;
1322
1323 for (i = 0; i < count; i++) {
1324 if ((layers[i]->src_rect.w != layers[i]->dst_rect.w) ||
1325 (layers[i]->src_rect.h != layers[i]->dst_rect.h)) {
1326 pr_err("multirect layers cannot have scaling: src: %dx%d dst: %dx%d\n",
1327 layers[i]->src_rect.w, layers[i]->src_rect.h,
1328 layers[i]->dst_rect.w, layers[i]->dst_rect.h);
1329 return false;
1330 }
1331
1332 dst[i] = (struct mdss_rect) {layers[i]->dst_rect.x,
1333 layers[i]->dst_rect.y,
1334 layers[i]->dst_rect.w,
1335 layers[i]->dst_rect.h};
1336 }
1337
1338 /* resolution related validation */
1339 if (mdss_rect_overlap_check(&dst[0], &dst[1])) {
1340 pr_err("multirect dst overlap is not allowed. input: %d,%d,%d,%d paired %d,%d,%d,%d\n",
1341 dst[0].x, dst[0].y, dst[0].w, dst[0].y,
1342 dst[1].x, dst[1].y, dst[1].w, dst[1].y);
1343 return false;
1344 }
1345
1346 return true;
1347}
1348
1349static bool __multirect_validate_properties(struct mdp_input_layer **layers,
1350 size_t count)
1351{
1352 /* not supporting more than 2 layers */
1353 if (count != 2)
1354 return false;
1355
1356 if ((layers[0]->flags & MDP_LAYER_ASYNC) ||
1357 (layers[1]->flags & MDP_LAYER_ASYNC)) {
1358 pr_err("ASYNC update is not allowed with multirect\n");
1359 return false;
1360 }
1361
1362 if (layers[0]->z_order == layers[1]->z_order) {
1363 pr_err("multirect layers cannot have same z_order=%d\n",
1364 layers[0]->z_order);
1365 return false;
1366 }
1367
1368 return true;
1369}
1370
1371static bool (*__multirect_validators[])(struct mdp_input_layer **layers,
1372 size_t count) = {
1373 __multirect_validate_flip,
1374 __multirect_validate_format,
1375 __multirect_validate_rects,
1376 __multirect_validate_properties,
1377};
1378
1379static inline int __multirect_layer_flags_to_mode(u32 flags)
1380{
1381 int mode;
1382
1383 if (flags & MDP_LAYER_MULTIRECT_ENABLE) {
1384 if (flags & MDP_LAYER_MULTIRECT_PARALLEL_MODE)
1385 mode = MDSS_MDP_PIPE_MULTIRECT_PARALLEL;
1386 else
1387 mode = MDSS_MDP_PIPE_MULTIRECT_SERIAL;
1388 } else {
1389 if (flags & MDP_LAYER_MULTIRECT_PARALLEL_MODE) {
1390 pr_err("Invalid parallel mode flag set without multirect enabled\n");
1391 return -EINVAL;
1392 }
1393
1394 mode = MDSS_MDP_PIPE_MULTIRECT_NONE;
1395 }
1396 return mode;
1397}
1398
1399static int __multirect_validate_mode(struct msm_fb_data_type *mfd,
1400 struct mdp_input_layer **layers,
1401 size_t count)
1402{
1403 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
1404 struct mdss_mdp_format_params *rec0_fmt;
1405 bool is_ubwc;
1406 int i, mode;
1407 struct mdp_rect *dst[MDSS_MDP_PIPE_MAX_RECTS];
1408
1409 /* not supporting more than 2 layers */
1410 if (count != 2)
1411 return false;
1412
1413 for (i = 0; i < count; i++)
1414 dst[i] = &layers[i]->dst_rect;
1415
1416 mode = __multirect_layer_flags_to_mode(layers[0]->flags);
1417
1418 /* format related validation */
1419 rec0_fmt = mdss_mdp_get_format_params(layers[0]->buffer.format);
1420 if (!rec0_fmt) {
1421 pr_err("invalid input layer format %d\n",
1422 layers[0]->buffer.format);
1423 return false;
1424 }
1425
1426 is_ubwc = mdss_mdp_is_ubwc_format(rec0_fmt);
1427
1428 if (mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL) {
1429 int threshold, yoffset;
1430
1431 if (dst[0]->y < dst[1]->y)
1432 yoffset = dst[1]->y - (dst[0]->y + dst[0]->h);
1433 else if (dst[1]->y < dst[0]->y)
1434 yoffset = dst[0]->y - (dst[1]->y + dst[1]->h);
1435 else
1436 yoffset = 0;
1437
1438 /*
1439 * time multiplexed is possible only if the y position of layers
1440 * is not overlapping and there is sufficient time to buffer
1441 * 2 lines/tiles. Otherwise use parallel fetch mode
1442 */
1443 threshold = 2;
1444 if (is_ubwc) {
1445 struct mdss_mdp_format_params_ubwc *uf;
1446
1447 /* in ubwc all layers would need to be same format */
1448 uf = (struct mdss_mdp_format_params_ubwc *)rec0_fmt;
1449 threshold *= uf->micro.tile_height;
1450 }
1451
1452 if (yoffset < threshold) {
1453 pr_err("Unable to operate in serial fetch mode with yoffset=%d dst[0]=%d,%d dst[1]=%d,%d\n",
1454 yoffset, dst[0]->y, dst[0]->h,
1455 dst[1]->y, dst[1]->h);
1456 return -EINVAL;
1457 }
1458 } else if (mode == MDSS_MDP_PIPE_MULTIRECT_PARALLEL) {
1459 u32 left_lm_w, rec0_mixer, rec1_mixer;
1460
1461 /*
1462 * For UBWC, 5 lines worth of buffering is needed in to meet
1463 * the performance which requires 2560w*4bpp*5lines = 50KB,
1464 * where 2560 is max width. Now let's say pixel ram is fixed to
1465 * 50KB then in UBWC parellel fetch, maximum width of each
1466 * rectangle would be 2560/2 = 1280.
1467 *
1468 * For Linear, this restriction is avoided because maximum
1469 * buffering of 2 lines is enough which yields to
1470 * 2560w*4bpp*2lines=20KB. Based on this, we can have 2 max
1471 * width rectangles in parrellel fetch mode.
1472 */
1473 if (is_ubwc &&
1474 ((dst[0]->w > (mdata->max_mixer_width / 2)) ||
1475 (dst[1]->w > (mdata->max_mixer_width / 2)))) {
1476 pr_err("in UBWC multirect parallel mode, max dst_w cannot be greater than %d. rec0_w=%d rec1_w=%d\n",
1477 mdata->max_mixer_width / 2,
1478 dst[0]->w, dst[1]->w);
1479 return -EINVAL;
1480 }
1481
1482 left_lm_w = left_lm_w_from_mfd(mfd);
1483 if (dst[0]->x < left_lm_w) {
1484 if (dst[0]->w > (left_lm_w - dst[0]->x)) {
1485 pr_err("multirect parallel mode, rec0 dst (%d,%d) cannot cross lm boundary (%d)\n",
1486 dst[0]->x, dst[0]->w, left_lm_w);
1487 return -EINVAL;
1488 }
1489 rec0_mixer = MDSS_MDP_MIXER_MUX_LEFT;
1490 } else {
1491 rec0_mixer = MDSS_MDP_MIXER_MUX_RIGHT;
1492 }
1493
1494 if (dst[1]->x < left_lm_w) {
1495 if (dst[0]->w > (left_lm_w - dst[0]->x)) {
1496 pr_err("multirect parallel mode, rec1 dst (%d,%d) cannot cross lm boundary (%d)\n",
1497 dst[1]->x, dst[1]->w, left_lm_w);
1498 return -EINVAL;
1499 }
1500 rec1_mixer = MDSS_MDP_MIXER_MUX_LEFT;
1501 } else {
1502 rec1_mixer = MDSS_MDP_MIXER_MUX_RIGHT;
1503 }
1504
1505 if (rec0_mixer != rec1_mixer) {
1506 pr_err("multirect parallel mode mixer mismatch. rec0_mix=%d rec1_mix=%d\n",
1507 rec0_mixer, rec1_mixer);
1508 return -EINVAL;
1509 }
1510 } else {
1511 pr_err("Invalid multirect mode %d\n", mode);
1512 }
1513
1514 pr_debug("layer->pndx:%d mode=%d\n", layers[0]->pipe_ndx, mode);
1515
1516 return 0;
1517}
1518
1519static int __update_multirect_info(struct msm_fb_data_type *mfd,
1520 struct mdss_mdp_validate_info_t *validate_info_list,
1521 struct mdp_input_layer *layer_list, int ndx, int layer_cnt)
1522{
1523 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1524 struct mdss_mdp_validate_info_t *vinfo[MDSS_MDP_PIPE_MAX_RECTS];
1525 int i, ptype, max_rects, mode;
1526 int cnt = 1;
1527
1528 mode = __multirect_layer_flags_to_mode(layer_list[ndx].flags);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301529 if (IS_ERR_VALUE((unsigned long)mode))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301530 return mode;
1531
1532 pr_debug("layer #%d pipe_ndx=%d multirect mode=%d\n",
1533 ndx, layer_list[ndx].pipe_ndx, mode);
1534
1535 vinfo[0] = &validate_info_list[ndx];
1536 vinfo[0]->layer = &layer_list[ndx];
1537 vinfo[0]->multirect.mode = mode;
1538 vinfo[0]->multirect.num = MDSS_MDP_PIPE_RECT0;
1539 vinfo[0]->multirect.next = NULL;
1540
1541 /* nothing to be done if multirect is disabled */
1542 if (mode == MDSS_MDP_PIPE_MULTIRECT_NONE)
1543 return cnt;
1544
1545 ptype = get_pipe_type_from_ndx(layer_list[ndx].pipe_ndx);
1546 if (ptype == MDSS_MDP_PIPE_TYPE_INVALID) {
1547 pr_err("invalid pipe ndx %d\n", layer_list[ndx].pipe_ndx);
1548 return -EINVAL;
1549 }
1550
1551 max_rects = mdata->rects_per_sspp[ptype] ? : 1;
1552
1553 for (i = ndx + 1; i < layer_cnt; i++) {
1554 if (layer_list[ndx].pipe_ndx == layer_list[i].pipe_ndx) {
1555 if (cnt >= max_rects) {
1556 pr_err("more than %d layers of type %d with same pipe_ndx=%d indexes=%d %d\n",
1557 max_rects, ptype,
1558 layer_list[ndx].pipe_ndx, ndx, i);
1559 return -EINVAL;
1560 }
1561
1562 mode = __multirect_layer_flags_to_mode(
1563 layer_list[i].flags);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301564 if (IS_ERR_VALUE((unsigned long)mode))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301565 return mode;
1566
1567 if (mode != vinfo[0]->multirect.mode) {
1568 pr_err("unable to set different multirect modes for pipe_ndx=%d (%d %d)\n",
1569 layer_list[ndx].pipe_ndx, ndx, i);
1570 return -EINVAL;
1571 }
1572
1573 pr_debug("found matching pair for pipe_ndx=%d (%d %d)\n",
1574 layer_list[i].pipe_ndx, ndx, i);
1575
1576 vinfo[cnt] = &validate_info_list[i];
1577 vinfo[cnt]->multirect.num = cnt;
1578 vinfo[cnt]->multirect.next = vinfo[0]->layer;
1579 vinfo[cnt]->multirect.mode = mode;
1580 vinfo[cnt]->layer = &layer_list[i];
1581
1582 vinfo[cnt - 1]->multirect.next = vinfo[cnt]->layer;
1583 cnt++;
1584 }
1585 }
1586
1587 if (cnt == 1) {
1588 pr_err("multirect mode enabled but unable to find extra rects for pipe_ndx=%x\n",
1589 layer_list[ndx].pipe_ndx);
1590 return -EINVAL;
1591 }
1592
1593 return cnt;
1594}
1595
1596static int __validate_multirect(struct msm_fb_data_type *mfd,
1597 struct mdss_mdp_validate_info_t *validate_info_list,
1598 struct mdp_input_layer *layer_list, int ndx, int layer_cnt)
1599{
1600 struct mdp_input_layer *layers[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
1601 int i, cnt, rc;
1602
1603 cnt = __update_multirect_info(mfd, validate_info_list,
1604 layer_list, ndx, layer_cnt);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301605 if (IS_ERR_VALUE((unsigned long)cnt))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301606 return cnt;
1607
1608 if (cnt <= 1) {
1609 /* nothing to validate in single rect mode */
1610 return 0;
1611 } else if (cnt > 2) {
1612 pr_err("unsupported multirect configuration, multirect cnt=%d\n",
1613 cnt);
1614 return -EINVAL;
1615 }
1616
1617 layers[0] = validate_info_list[ndx].layer;
1618 layers[1] = validate_info_list[ndx].multirect.next;
1619
1620 for (i = 0; i < ARRAY_SIZE(__multirect_validators); i++) {
1621 if (!__multirect_validators[i](layers, cnt))
1622 return -EINVAL;
1623 }
1624
1625 rc = __multirect_validate_mode(mfd, layers, cnt);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301626 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301627 return rc;
1628
1629 return 0;
1630}
1631
1632/*
1633 * __validate_layers() - validate input layers
1634 * @mfd: Framebuffer data structure for display
1635 * @commit: Commit version-1 structure for display
1636 *
1637 * This function validates all input layers present in layer_list. In case
1638 * of failure, it updates the "error_code" for failed layer. It is possible
1639 * to find failed layer from layer_list based on "error_code".
1640 */
1641static int __validate_layers(struct msm_fb_data_type *mfd,
1642 struct file *file, struct mdp_layer_commit_v1 *commit)
1643{
1644 int ret, i = 0;
1645 int rec_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
1646 int rec_release_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
1647 int rec_destroy_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
1648 u32 left_lm_layers = 0, right_lm_layers = 0;
1649 u32 left_cnt = 0, right_cnt = 0;
1650 u32 left_lm_w = left_lm_w_from_mfd(mfd);
1651 u32 mixer_mux, dst_x;
1652 int layer_count = commit->input_layer_cnt;
1653
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301654 struct mdss_mdp_pipe *pipe = NULL, *tmp, *left_blend_pipe;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301655 struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM] = {0};
1656 struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM] = {0};
1657 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1658
1659 struct mdss_mdp_mixer *mixer = NULL;
1660 struct mdp_input_layer *layer, *prev_layer, *layer_list;
1661 struct mdss_mdp_validate_info_t *validate_info_list = NULL;
1662 bool is_single_layer = false, force_validate;
1663 enum layer_pipe_q pipe_q_type;
1664 enum layer_zorder_used zorder_used[MDSS_MDP_MAX_STAGE] = {0};
1665 enum mdss_mdp_pipe_rect rect_num;
1666
1667 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
1668 if (ret)
1669 return ret;
1670
1671 if (!layer_count)
1672 goto validate_skip;
1673
1674 layer_list = commit->input_layers;
1675
1676 validate_info_list = kcalloc(layer_count, sizeof(*validate_info_list),
1677 GFP_KERNEL);
1678 if (!validate_info_list) {
1679 ret = -ENOMEM;
1680 goto end;
1681 }
1682
1683 for (i = 0; i < layer_count; i++) {
1684 if (layer_list[i].dst_rect.x >= left_lm_w)
1685 right_lm_layers++;
1686 else
1687 left_lm_layers++;
1688
1689 if (right_lm_layers >= MAX_PIPES_PER_LM ||
1690 left_lm_layers >= MAX_PIPES_PER_LM) {
1691 pr_err("too many pipes stagged mixer left: %d mixer right:%d\n",
1692 left_lm_layers, right_lm_layers);
1693 ret = -EINVAL;
1694 goto end;
1695 }
1696
1697 if (!validate_info_list[i].layer) {
1698 ret = __validate_multirect(mfd, validate_info_list,
1699 layer_list, i, layer_count);
1700 if (ret) {
1701 pr_err("error validating multirect config. ret=%d i=%d\n",
1702 ret, i);
1703 goto end;
1704 }
1705 }
1706
1707 rect_num = validate_info_list[i].multirect.num;
1708 WARN_ON(rect_num >= MDSS_MDP_PIPE_MAX_RECTS);
1709
1710 if (rec_ndx[rect_num] & layer_list[i].pipe_ndx) {
1711 pr_err("duplicate layer found pipe_ndx=%d rect=%d (0x%x)\n",
1712 layer_list[i].pipe_ndx, rect_num,
1713 rec_ndx[rect_num]);
1714 ret = -EINVAL;
1715 goto end;
1716 }
1717
1718 rec_ndx[rect_num] |= layer_list[i].pipe_ndx;
1719 }
1720
1721 /*
1722 * Force all layers to go through full validation after
1723 * dynamic resolution switch, immaterial of the configs in
1724 * the layer.
1725 */
1726 mutex_lock(&mfd->switch_lock);
1727 force_validate = (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED);
1728 mutex_unlock(&mfd->switch_lock);
1729
1730 for (i = 0; i < layer_count; i++) {
1731 enum layer_zorder_used z = LAYER_ZORDER_NONE;
1732
1733 layer = &layer_list[i];
1734 dst_x = layer->dst_rect.x;
1735 left_blend_pipe = NULL;
1736
1737 prev_layer = (i > 0) ? &layer_list[i - 1] : NULL;
1738 /*
1739 * check if current layer is at same z_order as
1740 * previous one, and fail if any or both are async layers,
1741 * as async layers should have unique z_order.
1742 *
1743 * If it has same z_order and qualifies as a right blend,
1744 * pass a pointer to the pipe representing previous overlay or
1745 * in other terms left blend layer.
1746 *
1747 * Following logic of selecting left_blend has an inherent
1748 * assumption that layer list is sorted on dst_x within a
1749 * same z_order. Otherwise it will fail based on z_order checks.
1750 */
1751 if (prev_layer && (prev_layer->z_order == layer->z_order)) {
1752 struct mdp_rect *left = &prev_layer->dst_rect;
1753 struct mdp_rect *right = &layer->dst_rect;
1754
1755 if ((layer->flags & MDP_LAYER_ASYNC)
1756 || (prev_layer->flags & MDP_LAYER_ASYNC)) {
1757 ret = -EINVAL;
1758 layer->error_code = ret;
1759 pr_err("async layer should have unique z_order\n");
1760 goto validate_exit;
1761 }
1762
1763 /*
1764 * check if layer is right blend by checking it's
1765 * directly to the right.
1766 */
1767 if (((left->x + left->w) == right->x) &&
1768 (left->y == right->y) && (left->h == right->h))
1769 left_blend_pipe = pipe;
1770
1771 /*
1772 * if the layer is right at the left lm boundary and
1773 * src split is not required then right blend is not
1774 * required as it will lie only on the left mixer
1775 */
1776 if (!__layer_needs_src_split(prev_layer) &&
1777 ((left->x + left->w) == left_lm_w))
1778 left_blend_pipe = NULL;
1779 }
1780
1781 if (!is_split_lm(mfd) || __layer_needs_src_split(layer))
1782 z = LAYER_ZORDER_BOTH;
1783 else if (dst_x >= left_lm_w)
1784 z = LAYER_ZORDER_RIGHT;
1785 else if ((dst_x + layer->dst_rect.w) <= left_lm_w)
1786 z = LAYER_ZORDER_LEFT;
1787 else
1788 z = LAYER_ZORDER_BOTH;
1789
1790 if (!left_blend_pipe && (layer->z_order >= MDSS_MDP_MAX_STAGE ||
1791 (z & zorder_used[layer->z_order]))) {
1792 pr_err("invalid z_order=%d or already in use %x\n",
1793 layer->z_order, z);
1794 ret = -EINVAL;
1795 layer->error_code = ret;
1796 goto validate_exit;
1797 } else {
1798 zorder_used[layer->z_order] |= z;
1799 }
1800
1801 if ((layer->dst_rect.x < left_lm_w) ||
1802 __layer_needs_src_split(layer)) {
1803 is_single_layer = (left_lm_layers == 1);
1804 mixer_mux = MDSS_MDP_MIXER_MUX_LEFT;
1805 } else {
1806 is_single_layer = (right_lm_layers == 1);
1807 mixer_mux = MDSS_MDP_MIXER_MUX_RIGHT;
1808 }
1809
1810 /**
1811 * search pipe in current used list to find if parameters
1812 * are same. validation can be skipped if only buffer handle
1813 * is changed.
1814 */
1815 pipe = (force_validate) ? NULL :
1816 __find_layer_in_validate_q(
1817 &validate_info_list[i], mdp5_data);
1818 if (pipe) {
1819 if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
1820 right_plist[right_cnt++] = pipe;
1821 else
1822 left_plist[left_cnt++] = pipe;
1823
1824 if (layer->flags & MDP_LAYER_PP) {
1825 memcpy(&pipe->pp_cfg, layer->pp_info,
1826 sizeof(struct mdp_overlay_pp_params));
1827 ret = mdss_mdp_pp_sspp_config(pipe);
1828 if (ret)
1829 pr_err("pp setup failed %d\n", ret);
1830 else
1831 pipe->params_changed++;
1832 }
1833 pipe->dirty = false;
1834 continue;
1835 }
1836
1837 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
1838 if (!mixer) {
1839 pr_err("unable to get %s mixer\n",
1840 (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) ?
1841 "right" : "left");
1842 ret = -EINVAL;
1843 layer->error_code = ret;
1844 goto validate_exit;
1845 }
1846
1847 layer->z_order += MDSS_MDP_STAGE_0;
1848 ret = __validate_single_layer(mfd, &validate_info_list[i],
1849 mixer_mux);
1850 if (ret) {
1851 pr_err("layer:%d validation failed ret=%d\n", i, ret);
1852 layer->error_code = ret;
1853 goto validate_exit;
1854 }
1855
1856 rect_num = validate_info_list[i].multirect.num;
1857
1858 pipe = __assign_pipe_for_layer(mfd, mixer, layer->pipe_ndx,
1859 &pipe_q_type, rect_num);
1860 if (IS_ERR_OR_NULL(pipe)) {
1861 pr_err("error assigning pipe id=0x%x rc:%ld\n",
1862 layer->pipe_ndx, PTR_ERR(pipe));
1863 ret = PTR_ERR(pipe);
1864 layer->error_code = ret;
1865 goto validate_exit;
1866 }
1867
1868 if (pipe_q_type == LAYER_USES_NEW_PIPE_Q)
1869 rec_release_ndx[rect_num] |= pipe->ndx;
1870 if (pipe_q_type == LAYER_USES_DESTROY_PIPE_Q)
1871 rec_destroy_ndx[rect_num] |= pipe->ndx;
1872
1873 ret = mdss_mdp_pipe_map(pipe);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301874 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301875 pr_err("Unable to map used pipe%d ndx=%x\n",
1876 pipe->num, pipe->ndx);
1877 layer->error_code = ret;
1878 goto validate_exit;
1879 }
1880
1881 if (pipe_q_type == LAYER_USES_USED_PIPE_Q) {
1882 /*
1883 * reconfig is allowed on new/destroy pipes. Only used
1884 * pipe needs this extra validation.
1885 */
1886 ret = __validate_layer_reconfig(layer, pipe);
1887 if (ret) {
1888 pr_err("layer reconfig validation failed=%d\n",
1889 ret);
1890 mdss_mdp_pipe_unmap(pipe);
1891 layer->error_code = ret;
1892 goto validate_exit;
1893 }
1894 }
1895
1896 ret = __configure_pipe_params(mfd, &validate_info_list[i], pipe,
1897 left_blend_pipe, is_single_layer, mixer_mux);
1898 if (ret) {
1899 pr_err("configure pipe param failed: pipe index= %d\n",
1900 pipe->ndx);
1901 mdss_mdp_pipe_unmap(pipe);
1902 layer->error_code = ret;
1903 goto validate_exit;
1904 }
1905
1906 mdss_mdp_pipe_unmap(pipe);
1907
1908 /* keep the original copy of dst_x */
1909 pipe->layer.dst_rect.x = layer->dst_rect.x = dst_x;
1910
1911 if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
1912 right_plist[right_cnt++] = pipe;
1913 else
1914 left_plist[left_cnt++] = pipe;
1915
1916 pr_debug("id:0x%x flags:0x%x dst_x:%d\n",
1917 layer->pipe_ndx, layer->flags, layer->dst_rect.x);
1918 layer->z_order -= MDSS_MDP_STAGE_0;
1919 }
1920
1921 ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
1922 right_plist, right_cnt);
1923 if (ret) {
1924 pr_err("bw validation check failed: %d\n", ret);
1925 goto validate_exit;
1926 }
1927
1928validate_skip:
1929 __handle_free_list(mdp5_data, validate_info_list, layer_count);
1930
1931 ret = __validate_secure_display(mdp5_data);
1932
1933validate_exit:
1934 pr_debug("err=%d total_layer:%d left:%d right:%d rec0_rel_ndx=0x%x rec1_rel_ndx=0x%x rec0_destroy_ndx=0x%x rec1_destroy_ndx=0x%x processed=%d\n",
1935 ret, layer_count, left_lm_layers, right_lm_layers,
1936 rec_release_ndx[0], rec_release_ndx[1],
1937 rec_destroy_ndx[0], rec_destroy_ndx[1], i);
1938 MDSS_XLOG(rec_ndx[0], rec_ndx[1], layer_count,
1939 left_lm_layers, right_lm_layers,
1940 rec_release_ndx[0], rec_release_ndx[1],
1941 rec_destroy_ndx[0], rec_destroy_ndx[1], ret);
1942 mutex_lock(&mdp5_data->list_lock);
1943 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301944 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301945 if (((pipe->ndx & rec_release_ndx[0]) &&
1946 (pipe->multirect.num == 0)) ||
1947 ((pipe->ndx & rec_release_ndx[1]) &&
1948 (pipe->multirect.num == 1))) {
1949 mdss_mdp_smp_unreserve(pipe);
1950 pipe->params_changed = 0;
1951 pipe->dirty = true;
1952 if (!list_empty(&pipe->list))
1953 list_del_init(&pipe->list);
1954 mdss_mdp_pipe_destroy(pipe);
1955 } else if (((pipe->ndx & rec_destroy_ndx[0]) &&
1956 (pipe->multirect.num == 0)) ||
1957 ((pipe->ndx & rec_destroy_ndx[1]) &&
1958 (pipe->multirect.num == 1))) {
1959 /*
1960 * cleanup/destroy list pipes should move back
1961 * to destroy list. Next/current kickoff cycle
1962 * will release the pipe because validate also
1963 * acquires ov_lock.
1964 */
1965 list_move(&pipe->list,
1966 &mdp5_data->pipes_destroy);
1967 }
1968 } else {
1969 pipe->file = file;
1970 pr_debug("file pointer attached with pipe is %pK\n",
1971 file);
1972 }
1973 }
1974 mutex_unlock(&mdp5_data->list_lock);
1975end:
1976 kfree(validate_info_list);
1977 mutex_unlock(&mdp5_data->ov_lock);
1978
1979 pr_debug("fb%d validated layers =%d\n", mfd->index, i);
1980
1981 return ret;
1982}
1983
1984/*
1985 * __parse_frc_info() - parse frc info from userspace
1986 * @mdp5_data: mdss data per FB device
1987 * @input_frc: frc info from user space
1988 *
1989 * This function fills the FRC info of current device which will be used
1990 * during following kickoff.
1991 */
1992static void __parse_frc_info(struct mdss_overlay_private *mdp5_data,
1993 struct mdp_frc_info *input_frc)
1994{
1995 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
1996 struct mdss_mdp_frc_fsm *frc_fsm = mdp5_data->frc_fsm;
1997
1998 if (input_frc->flags & MDP_VIDEO_FRC_ENABLE) {
1999 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
2000
2001 if (!frc_fsm->enable) {
2002 /* init frc_fsm when first entry */
2003 mdss_mdp_frc_fsm_init_state(frc_fsm);
2004 /* keep vsync on when FRC is enabled */
2005 ctl->ops.add_vsync_handler(ctl,
2006 &ctl->frc_vsync_handler);
2007 }
2008
2009 frc_info->cur_frc.frame_cnt = input_frc->frame_cnt;
2010 frc_info->cur_frc.timestamp = input_frc->timestamp;
2011 } else if (frc_fsm->enable) {
2012 /* remove vsync handler when FRC is disabled */
2013 ctl->ops.remove_vsync_handler(ctl, &ctl->frc_vsync_handler);
2014 }
2015
2016 frc_fsm->enable = input_frc->flags & MDP_VIDEO_FRC_ENABLE;
2017
2018 pr_debug("frc_enable=%d\n", frc_fsm->enable);
2019}
2020
2021/*
2022 * mdss_mdp_layer_pre_commit() - pre commit validation for input layers
2023 * @mfd: Framebuffer data structure for display
2024 * @commit: Commit version-1 structure for display
2025 *
2026 * This function checks if layers present in commit request are already
2027 * validated or not. If there is mismatch in validate and commit layers
2028 * then it validate all input layers again. On successful validation, it
2029 * maps the input layer buffer and creates release/retire fences.
2030 *
2031 * This function is called from client context and can return the error.
2032 */
2033int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
2034 struct file *file, struct mdp_layer_commit_v1 *commit)
2035{
2036 int ret, i;
2037 int layer_count = commit->input_layer_cnt;
2038 bool validate_failed = false;
2039
2040 struct mdss_mdp_pipe *pipe, *tmp;
2041 struct mdp_input_layer *layer_list;
2042 struct mdss_overlay_private *mdp5_data;
2043 struct mdss_mdp_data *src_data[MDSS_MDP_MAX_SSPP];
2044 struct mdss_mdp_validate_info_t *validate_info_list;
2045
2046 mdp5_data = mfd_to_mdp5_data(mfd);
2047
2048 if (!mdp5_data || !mdp5_data->ctl)
2049 return -EINVAL;
2050
2051 layer_list = commit->input_layers;
2052
2053 /* handle null commit */
2054 if (!layer_count) {
2055 __handle_free_list(mdp5_data, NULL, layer_count);
2056 /* Check for secure state transition. */
2057 return __validate_secure_display(mdp5_data);
2058 }
2059
2060 validate_info_list = kcalloc(layer_count, sizeof(*validate_info_list),
2061 GFP_KERNEL);
2062 if (!validate_info_list)
2063 return -ENOMEM;
2064
2065 for (i = 0; i < layer_count; i++) {
2066 if (!validate_info_list[i].layer) {
2067 ret = __update_multirect_info(mfd, validate_info_list,
2068 layer_list, i, layer_count);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302069 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302070 pr_err("error updating multirect config. ret=%d i=%d\n",
2071 ret, i);
2072 goto end;
2073 }
2074 }
2075 }
2076
2077 for (i = 0; i < layer_count; i++) {
2078 pipe = __find_layer_in_validate_q(&validate_info_list[i],
2079 mdp5_data);
2080 if (!pipe) {
2081 validate_failed = true;
2082 break;
2083 }
2084 }
2085
2086 if (validate_failed) {
2087 ret = __validate_layers(mfd, file, commit);
2088 if (ret) {
2089 pr_err("__validate_layers failed. rc=%d\n", ret);
2090 goto end;
2091 }
2092 } else {
2093 /*
2094 * move unassigned pipes to cleanup list since commit
2095 * supports validate+commit operation.
2096 */
2097 __handle_free_list(mdp5_data, validate_info_list, layer_count);
2098 }
2099
2100 i = 0;
2101
2102 mutex_lock(&mdp5_data->list_lock);
2103 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
2104 if (pipe->flags & MDP_SOLID_FILL) {
2105 src_data[i] = NULL;
2106 continue;
2107 }
2108 src_data[i] = __map_layer_buffer(mfd, pipe, validate_info_list,
2109 layer_count);
2110 if (IS_ERR_OR_NULL(src_data[i++])) {
2111 i--;
2112 mutex_unlock(&mdp5_data->list_lock);
2113 ret = PTR_ERR(src_data[i]);
2114 goto map_err;
2115 }
2116 }
2117 mutex_unlock(&mdp5_data->list_lock);
2118
2119 ret = mdss_mdp_overlay_start(mfd);
2120 if (ret) {
2121 pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
2122 goto map_err;
2123 }
2124
2125 if (commit->frc_info)
2126 __parse_frc_info(mdp5_data, commit->frc_info);
2127
2128 ret = __handle_buffer_fences(mfd, commit, layer_list);
2129
2130map_err:
2131 if (ret) {
2132 mutex_lock(&mdp5_data->list_lock);
2133 for (i--; i >= 0; i--)
2134 if (src_data[i])
2135 mdss_mdp_overlay_buf_free(mfd, src_data[i]);
2136 mutex_unlock(&mdp5_data->list_lock);
2137 }
2138end:
2139 kfree(validate_info_list);
2140
2141 return ret;
2142}
2143
2144/*
2145 * mdss_mdp_layer_atomic_validate() - validate input layers
2146 * @mfd: Framebuffer data structure for display
2147 * @commit: Commit version-1 structure for display
2148 *
2149 * This function validates only input layers received from client. It
2150 * does perform any validation for mdp_output_layer defined for writeback
2151 * display.
2152 */
2153int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
2154 struct file *file, struct mdp_layer_commit_v1 *commit)
2155{
2156 struct mdss_overlay_private *mdp5_data;
2157
2158 if (!mfd || !commit) {
2159 pr_err("invalid input params\n");
2160 return -EINVAL;
2161 }
2162
2163 mdp5_data = mfd_to_mdp5_data(mfd);
2164
2165 if (!mdp5_data || !mdp5_data->ctl) {
2166 pr_err("invalid input params\n");
2167 return -ENODEV;
2168 }
2169
2170 if (mdss_fb_is_power_off(mfd)) {
2171 pr_err("display interface is in off state fb:%d\n",
2172 mfd->index);
2173 return -EPERM;
2174 }
2175
2176 return __validate_layers(mfd, file, commit);
2177}
2178
2179int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd,
2180 struct file *file, struct mdp_layer_commit_v1 *commit)
2181{
2182 int rc, count;
2183 struct mdss_overlay_private *mdp5_data;
2184 struct mdss_mdp_wfd *wfd = NULL;
2185 struct mdp_output_layer *output_layer = NULL;
2186 struct mdss_mdp_wfd_data *data = NULL;
Sachin Bhayare2b6d0042018-01-13 19:38:21 +05302187 struct mdss_fence *fence = NULL;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302188 struct msm_sync_pt_data *sync_pt_data = NULL;
2189
2190 if (!mfd || !commit)
2191 return -EINVAL;
2192
2193 mdp5_data = mfd_to_mdp5_data(mfd);
2194
2195 if (!mdp5_data || !mdp5_data->ctl || !mdp5_data->wfd) {
2196 pr_err("invalid wfd state\n");
2197 return -ENODEV;
2198 }
2199
2200 if (commit->output_layer) {
2201 wfd = mdp5_data->wfd;
2202 output_layer = commit->output_layer;
2203
2204 if (output_layer->buffer.plane_count > MAX_PLANES) {
2205 pr_err("Output buffer plane_count exceeds MAX_PLANES limit:%d\n",
2206 output_layer->buffer.plane_count);
2207 return -EINVAL;
2208 }
2209
2210 data = mdss_mdp_wfd_add_data(wfd, output_layer);
2211 if (IS_ERR_OR_NULL(data))
2212 return PTR_ERR(data);
2213
2214 if (output_layer->buffer.fence >= 0) {
Sachin Bhayare2b6d0042018-01-13 19:38:21 +05302215 fence = mdss_get_fd_sync_fence(
2216 output_layer->buffer.fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302217 if (!fence) {
2218 pr_err("fail to get output buffer fence\n");
2219 rc = -EINVAL;
2220 goto fence_get_err;
2221 }
2222 }
2223 } else {
2224 wfd = mdp5_data->wfd;
2225 if (!wfd->ctl || !wfd->ctl->wb) {
2226 pr_err("wfd commit with null out layer and no validate\n");
2227 return -EINVAL;
2228 }
2229 }
2230
2231 rc = mdss_mdp_layer_pre_commit(mfd, file, commit);
2232 if (rc) {
2233 pr_err("fail to import input layer buffers. rc=%d\n", rc);
2234 goto input_layer_err;
2235 }
2236
2237 if (fence) {
2238 sync_pt_data = &mfd->mdp_sync_pt_data;
2239 mutex_lock(&sync_pt_data->sync_mutex);
2240 count = sync_pt_data->acq_fen_cnt;
2241
2242 if (count >= MDP_MAX_FENCE_FD) {
2243 pr_err("Reached maximum possible value for fence count\n");
2244 mutex_unlock(&sync_pt_data->sync_mutex);
2245 rc = -EINVAL;
2246 goto input_layer_err;
2247 }
2248
2249 sync_pt_data->acq_fen[count] = fence;
2250 sync_pt_data->acq_fen_cnt++;
2251 mutex_unlock(&sync_pt_data->sync_mutex);
2252 }
2253 return rc;
2254
2255input_layer_err:
2256 if (fence)
Sachin Bhayare2b6d0042018-01-13 19:38:21 +05302257 mdss_put_sync_fence(fence);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302258fence_get_err:
2259 if (data)
2260 mdss_mdp_wfd_remove_data(wfd, data);
2261 return rc;
2262}
2263
2264int mdss_mdp_layer_atomic_validate_wfd(struct msm_fb_data_type *mfd,
2265 struct file *file, struct mdp_layer_commit_v1 *commit)
2266{
2267 int rc = 0;
2268 struct mdss_overlay_private *mdp5_data;
2269 struct mdss_mdp_wfd *wfd;
2270 struct mdp_output_layer *output_layer;
2271
2272 if (!mfd || !commit)
2273 return -EINVAL;
2274
2275 mdp5_data = mfd_to_mdp5_data(mfd);
2276
2277 if (!mdp5_data || !mdp5_data->ctl || !mdp5_data->wfd) {
2278 pr_err("invalid wfd state\n");
2279 return -ENODEV;
2280 }
2281
2282 if (!commit->output_layer) {
2283 pr_err("no output layer defined\n");
2284 return -EINVAL;
2285 }
2286
2287 wfd = mdp5_data->wfd;
2288 output_layer = commit->output_layer;
2289
2290 rc = mdss_mdp_wfd_validate(wfd, output_layer);
2291 if (rc) {
2292 pr_err("fail to validate the output layer = %d\n", rc);
2293 goto validate_failed;
2294 }
2295
2296 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2297 rc = mdss_mdp_wfd_setup(wfd, output_layer);
2298 if (rc) {
2299 pr_err("fail to prepare wfd = %d\n", rc);
2300 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2301 goto validate_failed;
2302 }
2303 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2304
2305 rc = mdss_mdp_layer_atomic_validate(mfd, file, commit);
2306 if (rc) {
2307 pr_err("fail to validate the input layers = %d\n", rc);
2308 goto validate_failed;
2309 }
2310
2311validate_failed:
2312 return rc;
2313}
2314
2315int mdss_mdp_async_position_update(struct msm_fb_data_type *mfd,
2316 struct mdp_position_update *update_pos)
2317{
2318 int i, rc = 0;
2319 struct mdss_mdp_pipe *pipe = NULL;
2320 struct mdp_async_layer *layer;
2321 struct mdss_rect dst, src;
2322 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
2323 u32 flush_bits = 0, inputndx = 0;
2324
2325 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2326
2327 for (i = 0; i < update_pos->input_layer_cnt; i++) {
2328 layer = &update_pos->input_layers[i];
2329 mutex_lock(&mdp5_data->list_lock);
2330 __find_pipe_in_list(&mdp5_data->pipes_used, layer->pipe_ndx,
2331 &pipe, MDSS_MDP_PIPE_RECT0);
2332 mutex_unlock(&mdp5_data->list_lock);
2333 if (!pipe) {
2334 pr_err("invalid pipe ndx=0x%x for async update\n",
2335 layer->pipe_ndx);
2336 rc = -ENODEV;
2337 layer->error_code = rc;
2338 goto done;
2339 }
2340
2341 rc = __async_update_position_check(mfd, pipe, &layer->src,
2342 &layer->dst);
2343 if (rc) {
2344 layer->error_code = rc;
2345 goto done;
2346 }
2347
2348 src = (struct mdss_rect) {layer->src.x, layer->src.y,
2349 pipe->src.w, pipe->src.h};
2350 dst = (struct mdss_rect) {layer->dst.x, layer->dst.y,
2351 pipe->src.w, pipe->src.h};
2352
2353 pr_debug("src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
2354 src.x, src.y, src.w, src.h,
2355 dst.x, dst.y, dst.w, dst.h);
2356
2357 mdss_mdp_pipe_position_update(pipe, &src, &dst);
2358
2359 flush_bits |= mdss_mdp_get_pipe_flush_bits(pipe);
2360 inputndx |= layer->pipe_ndx;
2361 }
2362 mdss_mdp_async_ctl_flush(mfd, flush_bits);
2363
2364done:
2365 MDSS_XLOG(inputndx, update_pos->input_layer_cnt, flush_bits, rc);
2366 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2367 return rc;
2368}
2369