blob: 6a292b24d80adaf7c23c6d5533a440439c349d48 [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/errno.h>
17#include <linux/kernel.h>
18#include <linux/major.h>
19#include <linux/pm_runtime.h>
20#include <linux/uaccess.h>
21#include <linux/delay.h>
22#include <linux/msm_mdp.h>
23#include <linux/memblock.h>
24#include <linux/sync.h>
25#include <linux/sw_sync.h>
26#include <linux/file.h>
27
28#include <soc/qcom/event_timer.h>
29#include "mdss.h"
30#include "mdss_debug.h"
31#include "mdss_fb.h"
32#include "mdss_mdp.h"
33#include "mdss_mdp_wfd.h"
34
35#define CHECK_LAYER_BOUNDS(offset, size, max_size) \
36 (((size) > (max_size)) || ((offset) > ((max_size) - (size))))
37
38#define SCALER_ENABLED \
39 (MDP_LAYER_ENABLE_PIXEL_EXT | MDP_LAYER_ENABLE_QSEED3_SCALE)
40
41enum {
42 MDSS_MDP_RELEASE_FENCE = 0,
43 MDSS_MDP_RETIRE_FENCE,
44};
45
46enum layer_pipe_q {
47 LAYER_USES_NEW_PIPE_Q = 0,
48 LAYER_USES_USED_PIPE_Q,
49 LAYER_USES_DESTROY_PIPE_Q,
50};
51
52enum layer_zorder_used {
53 LAYER_ZORDER_NONE = 0,
54 LAYER_ZORDER_LEFT = 1,
55 LAYER_ZORDER_RIGHT = 2,
56 LAYER_ZORDER_BOTH = 3,
57};
58
59struct mdss_mdp_validate_info_t {
60 struct mdp_input_layer *layer;
61 struct mdss_mdp_pipe_multirect_params multirect;
62};
63
64/*
65 * __layer_needs_src_split() - check needs source split configuration
66 * @layer: input layer
67 *
68 * return true if the layer should be used as source split
69 */
70static bool __layer_needs_src_split(struct mdp_input_layer *layer)
71{
72 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
73
74 return (layer->flags & MDP_LAYER_ASYNC) ||
75 mdss_has_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
76}
77
78static int __async_update_position_check(struct msm_fb_data_type *mfd,
79 struct mdss_mdp_pipe *pipe, struct mdp_point *src,
80 struct mdp_point *dst)
81{
82 struct fb_var_screeninfo *var = &mfd->fbi->var;
83 u32 xres = var->xres;
84 u32 yres = var->yres;
85
86 if (!pipe->async_update
87 || CHECK_LAYER_BOUNDS(src->x, pipe->src.w, pipe->img_width)
88 || CHECK_LAYER_BOUNDS(src->y, pipe->src.h, pipe->img_height)
89 || CHECK_LAYER_BOUNDS(dst->x, pipe->dst.w, xres)
90 || CHECK_LAYER_BOUNDS(dst->y, pipe->dst.h, yres)) {
91 pr_err("invalid configs: async_update=%d, src:{%d,%d}, dst:{%d,%d}\n",
92 pipe->async_update, src->x, src->y, dst->x, dst->y);
93 pr_err("pipe:- src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
94 pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
95 pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
96 return -EINVAL;
97 }
98 return 0;
99}
100
101static int __cursor_layer_check(struct msm_fb_data_type *mfd,
102 struct mdp_input_layer *layer)
103{
104 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
105
106 if ((layer->z_order != HW_CURSOR_STAGE(mdata))
107 || layer->src_rect.w > mdata->max_cursor_size
108 || layer->src_rect.h > mdata->max_cursor_size
109 || layer->src_rect.w != layer->dst_rect.w
110 || layer->src_rect.h != layer->dst_rect.h
111 || !mdata->ncursor_pipes) {
112 pr_err("Incorrect cursor configs for pipe:%d, cursor_pipes:%d, z_order:%d\n",
113 layer->pipe_ndx, mdata->ncursor_pipes,
114 layer->z_order);
115 pr_err("src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
116 layer->src_rect.x, layer->src_rect.y,
117 layer->src_rect.w, layer->src_rect.h,
118 layer->dst_rect.x, layer->dst_rect.y,
119 layer->dst_rect.w, layer->dst_rect.h);
120 return -EINVAL;
121 }
122
123 return 0;
124}
125
126static int __layer_xres_check(struct msm_fb_data_type *mfd,
127 struct mdp_input_layer *layer)
128{
129 u32 xres = 0;
130 u32 left_lm_w = left_lm_w_from_mfd(mfd);
131 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
132 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
133
134 if (layer->dst_rect.x >= left_lm_w) {
135 if (mdata->has_src_split)
136 xres = left_lm_w;
137 else
138 layer->dst_rect.x -= left_lm_w;
139
140 if (ctl->mixer_right) {
141 xres += ctl->mixer_right->width;
142 } else {
143 pr_err("ov cannot be placed on right mixer\n");
144 return -EPERM;
145 }
146 } else {
147 if (ctl->mixer_left) {
148 xres = ctl->mixer_left->width;
149 } else {
150 pr_err("ov cannot be placed on left mixer\n");
151 return -EPERM;
152 }
153
154 if (mdata->has_src_split && ctl->mixer_right)
155 xres += ctl->mixer_right->width;
156 }
157
158 if (CHECK_LAYER_BOUNDS(layer->dst_rect.x, layer->dst_rect.w, xres)) {
159 pr_err("dst_xres is invalid. dst_x:%d, dst_w:%d, xres:%d\n",
160 layer->dst_rect.x, layer->dst_rect.w, xres);
161 return -EINVAL;
162 }
163
164 return 0;
165}
166
167static int __layer_param_check(struct msm_fb_data_type *mfd,
168 struct mdp_input_layer *layer, struct mdss_mdp_format_params *fmt,
169 enum mdss_mdp_pipe_rect rect_num)
170{
171 u32 yres;
172 u32 min_src_size, min_dst_size = 1;
173 int content_secure;
174 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
175 struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
176 u32 src_w, src_h, dst_w, dst_h, width, height;
177
178 if (!ctl) {
179 pr_err("ctl is null\n");
180 return -EINVAL;
181 }
182
183 if (ctl->mixer_left) {
184 yres = ctl->mixer_left->height;
185 } else {
186 pr_debug("Using fb var screen infor for height\n");
187 yres = mfd->fbi->var.yres;
188 }
189
190 content_secure = (layer->flags & MDP_LAYER_SECURE_SESSION);
191 if (!ctl->is_secure && content_secure &&
192 (mfd->panel.type == WRITEBACK_PANEL)) {
193 pr_debug("return due to security concerns\n");
194 return -EPERM;
195 }
196 min_src_size = fmt->is_yuv ? 2 : 1;
197
198 if (layer->z_order >= (mdata->max_target_zorder + MDSS_MDP_STAGE_0)) {
199 pr_err("zorder %d out of range\n", layer->z_order);
200 return -EINVAL;
201 }
202
203 if (!mdss_mdp_pipe_search(mdata, layer->pipe_ndx, rect_num)) {
204 pr_err("layer pipe is invalid: 0x%x rect:%d\n",
205 layer->pipe_ndx, rect_num);
206 return -EINVAL;
207 }
208
209 width = layer->buffer.width;
210 height = layer->buffer.height;
211 if (layer->flags & MDP_LAYER_DEINTERLACE) {
212 width *= 2;
213 height /= 2;
214 }
215
216 if (layer->buffer.width > MAX_IMG_WIDTH ||
217 layer->buffer.height > MAX_IMG_HEIGHT ||
218 layer->src_rect.w < min_src_size ||
219 layer->src_rect.h < min_src_size ||
220 CHECK_LAYER_BOUNDS(layer->src_rect.x, layer->src_rect.w, width) ||
221 CHECK_LAYER_BOUNDS(layer->src_rect.y, layer->src_rect.h, height)) {
222 pr_err("invalid source image img flag=%d wh=%dx%d rect=%d,%d,%d,%d\n",
223 layer->flags, width, height,
224 layer->src_rect.x, layer->src_rect.y,
225 layer->src_rect.w, layer->src_rect.h);
226 return -EINVAL;
227 }
228
229 if (layer->dst_rect.w < min_dst_size ||
230 layer->dst_rect.h < min_dst_size) {
231 pr_err("invalid destination resolution (%dx%d)",
232 layer->dst_rect.w, layer->dst_rect.h);
233 return -EINVAL;
234 }
235
236 if (layer->horz_deci || layer->vert_deci) {
237 if (!mdata->has_decimation) {
238 pr_err("No Decimation in MDP V=%x\n", mdata->mdp_rev);
239 return -EINVAL;
240 } else if ((layer->horz_deci > MAX_DECIMATION) ||
241 (layer->vert_deci > MAX_DECIMATION)) {
242 pr_err("Invalid decimation factors horz=%d vert=%d\n",
243 layer->horz_deci, layer->vert_deci);
244 return -EINVAL;
245 } else if (layer->flags & MDP_LAYER_BWC) {
246 pr_err("Decimation can't be enabled with BWC\n");
247 return -EINVAL;
248 } else if (fmt->fetch_mode != MDSS_MDP_FETCH_LINEAR) {
249 pr_err("Decimation can't be enabled with MacroTile format\n");
250 return -EINVAL;
251 }
252 }
253
254 if (CHECK_LAYER_BOUNDS(layer->dst_rect.y, layer->dst_rect.h, yres)) {
255 pr_err("invalid vertical destination: y=%d, h=%d, yres=%d\n",
256 layer->dst_rect.y, layer->dst_rect.h, yres);
257 return -EOVERFLOW;
258 }
259
260 dst_w = layer->dst_rect.w;
261 dst_h = layer->dst_rect.h;
262
263 src_w = layer->src_rect.w >> layer->horz_deci;
264 src_h = layer->src_rect.h >> layer->vert_deci;
265
266 if (src_w > mdata->max_mixer_width) {
267 pr_err("invalid source width=%d HDec=%d\n",
268 layer->src_rect.w, layer->horz_deci);
269 return -EINVAL;
270 }
271
272 if ((src_w * MAX_UPSCALE_RATIO) < dst_w) {
273 pr_err("too much upscaling Width %d->%d\n",
274 layer->src_rect.w, layer->dst_rect.w);
275 return -E2BIG;
276 }
277
278 if ((src_h * MAX_UPSCALE_RATIO) < dst_h) {
279 pr_err("too much upscaling. Height %d->%d\n",
280 layer->src_rect.h, layer->dst_rect.h);
281 return -E2BIG;
282 }
283
284 if (src_w > (dst_w * MAX_DOWNSCALE_RATIO)) {
285 pr_err("too much downscaling. Width %d->%d H Dec=%d\n",
286 src_w, layer->dst_rect.w, layer->horz_deci);
287 return -E2BIG;
288 }
289
290 if (src_h > (dst_h * MAX_DOWNSCALE_RATIO)) {
291 pr_err("too much downscaling. Height %d->%d V Dec=%d\n",
292 src_h, layer->dst_rect.h, layer->vert_deci);
293 return -E2BIG;
294 }
295
296 if (layer->flags & MDP_LAYER_BWC) {
297 if ((layer->buffer.width != layer->src_rect.w) ||
298 (layer->buffer.height != layer->src_rect.h)) {
299 pr_err("BWC: mismatch of src img=%dx%d rect=%dx%d\n",
300 layer->buffer.width, layer->buffer.height,
301 layer->src_rect.w, layer->src_rect.h);
302 return -EINVAL;
303 }
304
305 if (layer->horz_deci || layer->vert_deci) {
306 pr_err("Can't enable BWC decode && decimate\n");
307 return -EINVAL;
308 }
309 }
310
311 if ((layer->flags & MDP_LAYER_DEINTERLACE) &&
312 !(layer->flags & SCALER_ENABLED)) {
313 if (layer->flags & MDP_SOURCE_ROTATED_90) {
314 if ((layer->src_rect.w % 4) != 0) {
315 pr_err("interlaced rect not h/4\n");
316 return -EINVAL;
317 }
318 } else if ((layer->src_rect.h % 4) != 0) {
319 pr_err("interlaced rect not h/4\n");
320 return -EINVAL;
321 }
322 }
323
324 if (fmt->is_yuv) {
325 if ((layer->src_rect.x & 0x1) || (layer->src_rect.y & 0x1) ||
326 (layer->src_rect.w & 0x1) || (layer->src_rect.h & 0x1)) {
327 pr_err("invalid odd src resolution or coordinates\n");
328 return -EINVAL;
329 }
330 }
331
332 return 0;
333}
334
335/* compare all reconfiguration parameter validation in this API */
336static int __validate_layer_reconfig(struct mdp_input_layer *layer,
337 struct mdss_mdp_pipe *pipe)
338{
339 int status = 0;
340 struct mdss_mdp_format_params *src_fmt;
341
342 /*
343 * csc registers are not double buffered. It is not permitted
344 * to change them on staged pipe with YUV layer.
345 */
346 if (pipe->csc_coeff_set != layer->color_space) {
347 src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
348 if (pipe->src_fmt->is_yuv && src_fmt && src_fmt->is_yuv) {
349 status = -EPERM;
350 pr_err("csc change is not permitted on used pipe\n");
351 }
352 }
353
354 return status;
355}
356
357static int __validate_single_layer(struct msm_fb_data_type *mfd,
358 struct mdss_mdp_validate_info_t *layer_info, u32 mixer_mux)
359{
360 u32 bwc_enabled;
361 int ret;
362 bool is_vig_needed = false;
363 struct mdss_mdp_format_params *fmt;
364 struct mdss_mdp_mixer *mixer = NULL;
365 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
366 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
367 struct mdp_input_layer *layer = layer_info->layer;
368 int ptype = get_pipe_type_from_ndx(layer->pipe_ndx);
369
370 if (ptype == MDSS_MDP_PIPE_TYPE_INVALID) {
371 pr_err("Invalid pipe ndx=%d\n", layer->pipe_ndx);
372 return -EINVAL;
373 }
374
375 if ((layer->dst_rect.w > mdata->max_mixer_width) ||
376 (layer->dst_rect.h > MAX_DST_H)) {
377 pr_err("exceeded max mixer supported resolution %dx%d\n",
378 layer->dst_rect.w, layer->dst_rect.h);
379 ret = -EINVAL;
380 goto exit_fail;
381 }
382
383 pr_debug("ctl=%u mux=%d z_order=%d flags=0x%x dst_x:%d\n",
384 mdp5_data->ctl->num, mixer_mux, layer->z_order,
385 layer->flags, layer->dst_rect.x);
386
387 fmt = mdss_mdp_get_format_params(layer->buffer.format);
388 if (!fmt) {
389 pr_err("invalid layer format %d\n", layer->buffer.format);
390 ret = -EINVAL;
391 goto exit_fail;
392 }
393
394 bwc_enabled = layer->flags & MDP_LAYER_BWC;
395
396 if (bwc_enabled) {
397 if (!mdp5_data->mdata->has_bwc) {
398 pr_err("layer uses bwc format but MDP does not support it\n");
399 ret = -EINVAL;
400 goto exit_fail;
401 }
402
403 layer->buffer.format =
404 mdss_mdp_get_rotator_dst_format(
405 layer->buffer.format, false, bwc_enabled);
406 fmt = mdss_mdp_get_format_params(layer->buffer.format);
407 if (!fmt) {
408 pr_err("invalid layer format %d\n",
409 layer->buffer.format);
410 ret = -EINVAL;
411 goto exit_fail;
412 }
413 }
414
415 if (ptype == MDSS_MDP_PIPE_TYPE_CURSOR) {
416 ret = __cursor_layer_check(mfd, layer);
417 if (ret)
418 goto exit_fail;
419 }
420
421 ret = __layer_xres_check(mfd, layer);
422 if (ret)
423 goto exit_fail;
424
425 ret = __layer_param_check(mfd, layer, fmt, layer_info->multirect.num);
426 if (ret)
427 goto exit_fail;
428
429 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
430 if (!mixer) {
431 pr_err("unable to get %s mixer\n",
432 (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) ?
433 "right" : "left");
434 ret = -EPERM;
435 goto exit_fail;
436 }
437
438 if (fmt->is_yuv || (mdata->has_non_scalar_rgb &&
439 ((layer->src_rect.w != layer->dst_rect.w) ||
440 (layer->src_rect.h != layer->dst_rect.h))))
441 is_vig_needed = true;
442
443 if (is_vig_needed && ptype != MDSS_MDP_PIPE_TYPE_VIG) {
444 pr_err("pipe is non-scalar ndx=%x\n", layer->pipe_ndx);
445 ret = -EINVAL;
446 goto exit_fail;
447 }
448
449 if (((ptype == MDSS_MDP_PIPE_TYPE_DMA) ||
450 (ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) &&
451 (layer->dst_rect.h != layer->src_rect.h ||
452 layer->dst_rect.w != layer->src_rect.w)) {
453 pr_err("no scaling supported on dma/cursor pipe, pipe num:%d\n",
454 layer->pipe_ndx);
455 return -EINVAL;
456 }
457
458exit_fail:
459 return ret;
460}
461
462static int __configure_pipe_params(struct msm_fb_data_type *mfd,
463 struct mdss_mdp_validate_info_t *vinfo, struct mdss_mdp_pipe *pipe,
464 struct mdss_mdp_pipe *left_blend_pipe, bool is_single_layer,
465 u32 mixer_mux)
466{
467 int ret = 0;
468 u32 left_lm_w = left_lm_w_from_mfd(mfd);
469 u32 flags;
470 bool is_right_blend = false;
471
472 struct mdss_mdp_mixer *mixer = NULL;
473 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
474 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
475 struct mdp_input_layer *layer = vinfo->layer;
476
477 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
478 pipe->src_fmt = mdss_mdp_get_format_params(layer->buffer.format);
479 if (!pipe->src_fmt || !mixer) {
480 pr_err("invalid layer format:%d or mixer:%pK\n",
481 layer->buffer.format, pipe->mixer_left);
482 ret = -EINVAL;
483 goto end;
484 }
485
486 pipe->comp_ratio = layer->buffer.comp_ratio;
487
488 if (mfd->panel_orientation)
489 layer->flags ^= mfd->panel_orientation;
490
491 pipe->mixer_left = mixer;
492 pipe->mfd = mfd;
493 pipe->play_cnt = 0;
494 pipe->flags = 0;
495
496 if (layer->flags & MDP_LAYER_FLIP_LR)
497 pipe->flags = MDP_FLIP_LR;
498 if (layer->flags & MDP_LAYER_FLIP_UD)
499 pipe->flags |= MDP_FLIP_UD;
500 if (layer->flags & MDP_LAYER_SECURE_SESSION)
501 pipe->flags |= MDP_SECURE_OVERLAY_SESSION;
502 if (layer->flags & MDP_LAYER_SECURE_DISPLAY_SESSION)
503 pipe->flags |= MDP_SECURE_DISPLAY_OVERLAY_SESSION;
504 if (layer->flags & MDP_LAYER_SOLID_FILL)
505 pipe->flags |= MDP_SOLID_FILL;
506 if (layer->flags & MDP_LAYER_DEINTERLACE)
507 pipe->flags |= MDP_DEINTERLACE;
508 if (layer->flags & MDP_LAYER_BWC)
509 pipe->flags |= MDP_BWC_EN;
510 if (layer->flags & MDP_LAYER_PP)
511 pipe->flags |= MDP_OVERLAY_PP_CFG_EN;
512
513 pipe->is_fg = layer->flags & MDP_LAYER_FORGROUND;
514 pipe->img_width = layer->buffer.width & 0x3fff;
515 pipe->img_height = layer->buffer.height & 0x3fff;
516 pipe->src.x = layer->src_rect.x;
517 pipe->src.y = layer->src_rect.y;
518 pipe->src.w = layer->src_rect.w;
519 pipe->src.h = layer->src_rect.h;
520 pipe->dst.x = layer->dst_rect.x;
521 pipe->dst.y = layer->dst_rect.y;
522 pipe->dst.w = layer->dst_rect.w;
523 pipe->dst.h = layer->dst_rect.h;
524 pipe->horz_deci = layer->horz_deci;
525 pipe->vert_deci = layer->vert_deci;
526 pipe->bg_color = layer->bg_color;
527 pipe->alpha = layer->alpha;
528 pipe->transp = layer->transp_mask;
529 pipe->blend_op = layer->blend_op;
530 pipe->is_handed_off = false;
531 pipe->async_update = (layer->flags & MDP_LAYER_ASYNC) ? true : false;
532 pipe->csc_coeff_set = layer->color_space;
533
534 if (mixer->ctl) {
535 pipe->dst.x += mixer->ctl->border_x_off;
536 pipe->dst.y += mixer->ctl->border_y_off;
537 pr_debug("border{%d,%d}\n", mixer->ctl->border_x_off,
538 mixer->ctl->border_y_off);
539 }
540 pr_debug("src{%d,%d,%d,%d}, dst{%d,%d,%d,%d}\n",
541 pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
542 pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
543
544 if (layer->flags & SCALER_ENABLED)
545 memcpy(&pipe->scaler, layer->scale,
546 sizeof(struct mdp_scale_data_v2));
547
548 pipe->scaler.enable = (layer->flags & SCALER_ENABLED);
549
550 flags = pipe->flags;
551 if (is_single_layer)
552 flags |= PERF_CALC_PIPE_SINGLE_LAYER;
553
554 /*
555 * async update is allowed only in video mode panels with single LM
556 * or dual LM with src_split enabled.
557 */
558 if (pipe->async_update && ((is_split_lm(mfd) && !mdata->has_src_split)
559 || (!mdp5_data->ctl->is_video_mode))) {
560 pr_err("async update allowed only in video mode panel with src_split\n");
561 ret = -EINVAL;
562 goto end;
563 }
564
565 /*
566 * unstage the pipe if it's current z_order does not match with new
567 * z_order because client may only call the validate.
568 */
569 if (pipe->mixer_stage != layer->z_order)
570 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
571
572 /*
573 * check if overlay span across two mixers and if source split is
574 * available. If yes, enable src_split_req flag so that during mixer
575 * staging, same pipe will be stagged on both layer mixers.
576 */
577 if (mdata->has_src_split) {
578 is_right_blend = pipe->is_right_blend;
579 if (left_blend_pipe) {
580 if (pipe->priority <= left_blend_pipe->priority) {
581 pr_err("priority limitation. left:%d right%d\n",
582 left_blend_pipe->priority,
583 pipe->priority);
584 ret = -EPERM;
585 goto end;
586 } else {
587 pr_debug("pipe%d is a right_pipe\n", pipe->num);
588 is_right_blend = true;
589 }
590 } else if (pipe->is_right_blend) {
591 /*
592 * pipe used to be right blend. So need to update mixer
593 * configuration to remove it as a right blend.
594 */
595 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
596 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
597 is_right_blend = false;
598 }
599
600 if (is_split_lm(mfd) && __layer_needs_src_split(layer)) {
601 pipe->src_split_req = true;
602 } else if ((mixer_mux == MDSS_MDP_MIXER_MUX_LEFT) &&
603 ((layer->dst_rect.x + layer->dst_rect.w) > mixer->width)) {
604 if (layer->dst_rect.x >= mixer->width) {
605 pr_err("%pS: err dst_x can't lie in right half",
606 __builtin_return_address(0));
607 pr_cont(" flags:0x%x dst x:%d w:%d lm_w:%d\n",
608 layer->flags, layer->dst_rect.x,
609 layer->dst_rect.w, mixer->width);
610 ret = -EINVAL;
611 goto end;
612 } else {
613 pipe->src_split_req = true;
614 }
615 } else {
616 if (pipe->src_split_req) {
617 mdss_mdp_mixer_pipe_unstage(pipe,
618 pipe->mixer_right);
619 pipe->mixer_right = NULL;
620 }
621 pipe->src_split_req = false;
622 }
623 pipe->is_right_blend = is_right_blend;
624 }
625
626 pipe->multirect.mode = vinfo->multirect.mode;
627 pipe->mixer_stage = layer->z_order;
628
629 if (mfd->panel_orientation & MDP_FLIP_LR)
630 pipe->dst.x = pipe->mixer_left->width - pipe->dst.x -
631 pipe->dst.w;
632 if (mfd->panel_orientation & MDP_FLIP_UD)
633 pipe->dst.y = pipe->mixer_left->height - pipe->dst.y -
634 pipe->dst.h;
635
636 memcpy(&pipe->layer, layer, sizeof(struct mdp_input_layer));
637
638 mdss_mdp_overlay_set_chroma_sample(pipe);
639
640 if (pipe->blend_op == BLEND_OP_NOT_DEFINED)
641 pipe->blend_op = pipe->src_fmt->alpha_enable ?
642 BLEND_OP_PREMULTIPLIED : BLEND_OP_OPAQUE;
643
644 if (pipe->src_fmt->is_yuv && !(pipe->flags & MDP_SOURCE_ROTATED_90) &&
645 !pipe->scaler.enable) {
646 pipe->overfetch_disable = OVERFETCH_DISABLE_BOTTOM;
647
648 if (pipe->dst.x >= left_lm_w)
649 pipe->overfetch_disable |= OVERFETCH_DISABLE_RIGHT;
650 pr_debug("overfetch flags=%x\n", pipe->overfetch_disable);
651 } else {
652 pipe->overfetch_disable = 0;
653 }
654
655 /*
656 * When scaling is enabled src crop and image
657 * width and height is modified by user
658 */
659 if ((pipe->flags & MDP_DEINTERLACE) && !pipe->scaler.enable) {
660 if (pipe->flags & MDP_SOURCE_ROTATED_90) {
661 pipe->src.x = DIV_ROUND_UP(pipe->src.x, 2);
662 pipe->src.x &= ~1;
663 pipe->src.w /= 2;
664 pipe->img_width /= 2;
665 } else {
666 pipe->src.h /= 2;
667 pipe->src.y = DIV_ROUND_UP(pipe->src.y, 2);
668 pipe->src.y &= ~1;
669 }
670 }
671
672 ret = mdss_mdp_overlay_setup_scaling(pipe);
673 if (ret) {
674 pr_err("scaling setup failed %d\n", ret);
675 goto end;
676 }
677
678 if (layer->flags & MDP_LAYER_PP) {
679 memcpy(&pipe->pp_cfg, layer->pp_info,
680 sizeof(struct mdp_overlay_pp_params));
681 ret = mdss_mdp_pp_sspp_config(pipe);
682 if (ret) {
683 pr_err("pp setup failed %d\n", ret);
684 goto end;
685 }
686 }
687
688 if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
689 goto end;
690
691 ret = mdp_pipe_tune_perf(pipe, flags);
692 if (ret) {
693 pr_err("unable to satisfy performance. ret=%d\n", ret);
694 goto end;
695 }
696
697 ret = mdss_mdp_smp_reserve(pipe);
698 if (ret) {
699 pr_err("mdss_mdp_smp_reserve failed. pnum:%d ret=%d\n",
700 pipe->num, ret);
701 goto end;
702 }
703end:
704 return ret;
705}
706
707static struct sync_fence *__create_fence(struct msm_fb_data_type *mfd,
708 struct msm_sync_pt_data *sync_pt_data, u32 fence_type,
709 int *fence_fd, int value)
710{
711 struct mdss_overlay_private *mdp5_data;
712 struct mdss_mdp_ctl *ctl;
713 struct sync_fence *sync_fence = NULL;
714 char fence_name[32];
715
716 mdp5_data = mfd_to_mdp5_data(mfd);
717
718 ctl = mdp5_data->ctl;
719 if (!ctl->ops.add_vsync_handler) {
720 pr_err("fb%d vsync pending first update\n", mfd->index);
721 return ERR_PTR(-EOPNOTSUPP);
722 }
723
724 if (!mdss_mdp_ctl_is_power_on(ctl)) {
725 pr_err("fb%d ctl power on failed\n", mfd->index);
726 return ERR_PTR(-EPERM);
727 }
728
729 if (fence_type == MDSS_MDP_RETIRE_FENCE)
730 snprintf(fence_name, sizeof(fence_name), "fb%d_retire",
731 mfd->index);
732 else
733 snprintf(fence_name, sizeof(fence_name), "fb%d_release",
734 mfd->index);
735
736 if ((fence_type == MDSS_MDP_RETIRE_FENCE) &&
737 (mfd->panel.type == MIPI_CMD_PANEL)) {
738 if (mdp5_data->vsync_timeline) {
739 value = mdp5_data->vsync_timeline->value + 1 +
740 mdp5_data->retire_cnt++;
741 sync_fence = mdss_fb_sync_get_fence(
742 mdp5_data->vsync_timeline, fence_name, value);
743 } else {
744 return ERR_PTR(-EPERM);
745 }
746 } else {
747 sync_fence = mdss_fb_sync_get_fence(sync_pt_data->timeline,
748 fence_name, value);
749 }
750
751 if (IS_ERR_OR_NULL(sync_fence)) {
752 pr_err("%s: unable to retrieve release fence\n", fence_name);
753 goto end;
754 }
755
756 /* get fence fd */
757 *fence_fd = get_unused_fd_flags(0);
758 if (*fence_fd < 0) {
759 pr_err("%s: get_unused_fd_flags failed error:0x%x\n",
760 fence_name, *fence_fd);
761 sync_fence_put(sync_fence);
762 sync_fence = NULL;
763 goto end;
764 }
765
766end:
767 return sync_fence;
768}
769
770/*
771 * __handle_buffer_fences() - copy sync fences and return release/retire
772 * fence to caller.
773 *
774 * This function copies all input sync fences to acquire fence array and
775 * returns release/retire fences to caller. It acts like buff_sync ioctl.
776 */
777static int __handle_buffer_fences(struct msm_fb_data_type *mfd,
778 struct mdp_layer_commit_v1 *commit, struct mdp_input_layer *layer_list)
779{
780 struct sync_fence *fence, *release_fence, *retire_fence;
781 struct msm_sync_pt_data *sync_pt_data = NULL;
782 struct mdp_input_layer *layer;
783 int value;
784
785 u32 acq_fen_count, i, ret = 0;
786 u32 layer_count = commit->input_layer_cnt;
787
788 sync_pt_data = &mfd->mdp_sync_pt_data;
789 if (!sync_pt_data) {
790 pr_err("sync point data are NULL\n");
791 return -EINVAL;
792 }
793
794 i = mdss_fb_wait_for_fence(sync_pt_data);
795 if (i > 0)
796 pr_warn("%s: waited on %d active fences\n",
797 sync_pt_data->fence_name, i);
798
799 mutex_lock(&sync_pt_data->sync_mutex);
800 for (i = 0, acq_fen_count = 0; i < layer_count; i++) {
801 layer = &layer_list[i];
802
803 if (layer->buffer.fence < 0)
804 continue;
805
806 fence = sync_fence_fdget(layer->buffer.fence);
807 if (!fence) {
808 pr_err("%s: sync fence get failed! fd=%d\n",
809 sync_pt_data->fence_name, layer->buffer.fence);
810 ret = -EINVAL;
811 break;
812 }
813 sync_pt_data->acq_fen[acq_fen_count++] = fence;
814 }
815 sync_pt_data->acq_fen_cnt = acq_fen_count;
816 if (ret)
817 goto sync_fence_err;
818
819 value = sync_pt_data->timeline_value + sync_pt_data->threshold +
820 atomic_read(&sync_pt_data->commit_cnt);
821
822 release_fence = __create_fence(mfd, sync_pt_data,
823 MDSS_MDP_RELEASE_FENCE, &commit->release_fence, value);
824 if (IS_ERR_OR_NULL(release_fence)) {
825 pr_err("unable to retrieve release fence\n");
826 ret = PTR_ERR(release_fence);
827 goto release_fence_err;
828 }
829
830 retire_fence = __create_fence(mfd, sync_pt_data,
831 MDSS_MDP_RETIRE_FENCE, &commit->retire_fence, value);
832 if (IS_ERR_OR_NULL(retire_fence)) {
833 pr_err("unable to retrieve retire fence\n");
834 ret = PTR_ERR(retire_fence);
835 goto retire_fence_err;
836 }
837
838 sync_fence_install(release_fence, commit->release_fence);
839 sync_fence_install(retire_fence, commit->retire_fence);
840
841 mutex_unlock(&sync_pt_data->sync_mutex);
842 return ret;
843
844retire_fence_err:
845 put_unused_fd(commit->release_fence);
846 sync_fence_put(release_fence);
847release_fence_err:
848 commit->retire_fence = -1;
849 commit->release_fence = -1;
850sync_fence_err:
851 for (i = 0; i < sync_pt_data->acq_fen_cnt; i++)
852 sync_fence_put(sync_pt_data->acq_fen[i]);
853 sync_pt_data->acq_fen_cnt = 0;
854
855 mutex_unlock(&sync_pt_data->sync_mutex);
856
857 return ret;
858}
859
860/*
861 * __map_layer_buffer() - map input layer buffer
862 *
863 * This function maps input layer buffer. It supports only single layer
864 * buffer mapping right now. This is case for all formats including UBWC.
865 */
866static struct mdss_mdp_data *__map_layer_buffer(struct msm_fb_data_type *mfd,
867 struct mdss_mdp_pipe *pipe,
868 struct mdss_mdp_validate_info_t *validate_info_list,
869 u32 layer_count)
870{
871 struct mdss_mdp_data *src_data;
872 struct mdp_input_layer *layer = NULL;
873 struct mdp_layer_buffer *buffer;
874 struct msmfb_data image;
875 int i, ret;
876 u32 flags;
877 struct mdss_mdp_validate_info_t *vitem;
878
879 for (i = 0; i < layer_count; i++) {
880 vitem = &validate_info_list[i];
881 layer = vitem->layer;
882 if ((layer->pipe_ndx == pipe->ndx) &&
883 (vitem->multirect.num == pipe->multirect.num))
884 break;
885 }
886
887 if (i == layer_count) {
888 pr_err("layer count index is out of bound\n");
889 src_data = ERR_PTR(-EINVAL);
890 goto end;
891 }
892
893 buffer = &layer->buffer;
894
895 if (pipe->flags & MDP_SOLID_FILL) {
896 pr_err("Unexpected buffer queue to a solid fill pipe\n");
897 src_data = ERR_PTR(-EINVAL);
898 goto end;
899 }
900
901 flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
902 MDP_SECURE_DISPLAY_OVERLAY_SESSION));
903
904 if (buffer->planes[0].fd < 0) {
905 pr_err("invalid file descriptor for layer buffer\n");
906 src_data = ERR_PTR(-EINVAL);
907 goto end;
908 }
909
910 src_data = mdss_mdp_overlay_buf_alloc(mfd, pipe);
911 if (!src_data) {
912 pr_err("unable to allocate source buffer\n");
913 src_data = ERR_PTR(-ENOMEM);
914 goto end;
915 }
916 memset(&image, 0, sizeof(image));
917
918 image.memory_id = buffer->planes[0].fd;
919 image.offset = buffer->planes[0].offset;
920 ret = mdss_mdp_data_get_and_validate_size(src_data, &image, 1,
921 flags, &mfd->pdev->dev, false, DMA_TO_DEVICE,
922 buffer);
923 if (ret)
924 goto end_buf_free;
925
926 src_data->num_planes = 1;
927 return src_data;
928
929end_buf_free:
930 mdss_mdp_overlay_buf_free(mfd, src_data);
931 src_data = ERR_PTR(ret);
932end:
933 return src_data;
934}
935
936static inline bool __compare_layer_config(struct mdp_input_layer *validate,
937 struct mdss_mdp_pipe *pipe)
938{
939 struct mdp_input_layer *layer = &pipe->layer;
940 bool status = true;
941
942 status = !memcmp(&validate->src_rect, &layer->src_rect,
943 sizeof(validate->src_rect)) &&
944 !memcmp(&validate->dst_rect, &layer->dst_rect,
945 sizeof(validate->dst_rect)) &&
946 validate->flags == layer->flags &&
947 validate->horz_deci == layer->horz_deci &&
948 validate->vert_deci == layer->vert_deci &&
949 validate->alpha == layer->alpha &&
950 validate->color_space == layer->color_space &&
951 validate->z_order == (layer->z_order - MDSS_MDP_STAGE_0) &&
952 validate->transp_mask == layer->transp_mask &&
953 validate->bg_color == layer->bg_color &&
954 validate->blend_op == layer->blend_op &&
955 validate->buffer.width == layer->buffer.width &&
956 validate->buffer.height == layer->buffer.height &&
957 validate->buffer.format == layer->buffer.format;
958
959 if (status && (validate->flags & SCALER_ENABLED))
960 status = !memcmp(validate->scale, &pipe->scaler,
961 sizeof(pipe->scaler));
962
963 return status;
964}
965
966/*
967 * __find_layer_in_validate_q() - Search layer in validation queue
968 *
969 * This functions helps to skip validation for layers where only buffer is
970 * changing. For ex: video playback case. In order to skip validation, it
971 * compares all input layer params except buffer handle, offset, fences.
972 */
973static struct mdss_mdp_pipe *__find_layer_in_validate_q(
974 struct mdss_mdp_validate_info_t *vinfo,
975 struct mdss_overlay_private *mdp5_data)
976{
977 bool found = false;
978 struct mdss_mdp_pipe *pipe;
979 struct mdp_input_layer *layer = vinfo->layer;
980
981 mutex_lock(&mdp5_data->list_lock);
982 list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
983 if ((pipe->ndx == layer->pipe_ndx) &&
984 (pipe->multirect.num == vinfo->multirect.num)) {
985 if (__compare_layer_config(layer, pipe))
986 found = true;
987 break;
988 }
989 }
990 mutex_unlock(&mdp5_data->list_lock);
991
992 return found ? pipe : NULL;
993}
994
995static bool __find_pipe_in_list(struct list_head *head,
996 int pipe_ndx, struct mdss_mdp_pipe **out_pipe,
997 enum mdss_mdp_pipe_rect rect_num)
998{
999 struct mdss_mdp_pipe *pipe;
1000
1001 list_for_each_entry(pipe, head, list) {
1002 if ((pipe_ndx == pipe->ndx) &&
1003 (rect_num == pipe->multirect.num)) {
1004 *out_pipe = pipe;
1005 return true;
1006 }
1007 }
1008
1009 return false;
1010}
1011
1012/*
1013 * Search pipe from destroy and cleanup list to avoid validation failure.
1014 * It is caller responsibility to hold the list lock before calling this API.
1015 */
1016static struct mdss_mdp_pipe *__find_and_move_cleanup_pipe(
1017 struct mdss_overlay_private *mdp5_data, u32 pipe_ndx,
1018 enum mdss_mdp_pipe_rect rect_num)
1019{
1020 struct mdss_mdp_pipe *pipe = NULL;
1021
1022 if (__find_pipe_in_list(&mdp5_data->pipes_destroy,
1023 pipe_ndx, &pipe, rect_num)) {
1024 pr_debug("reuse destroy pipe id:%d ndx:%d rect:%d\n",
1025 pipe->num, pipe_ndx, rect_num);
1026 list_move(&pipe->list, &mdp5_data->pipes_used);
1027 } else if (__find_pipe_in_list(&mdp5_data->pipes_cleanup,
1028 pipe_ndx, &pipe, rect_num)) {
1029 pr_debug("reuse cleanup pipe id:%d ndx:%d rect:%d\n",
1030 pipe->num, pipe_ndx, rect_num);
1031 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
1032 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
1033 pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
1034 list_move(&pipe->list, &mdp5_data->pipes_used);
1035 }
1036
1037 return pipe;
1038}
1039
1040/*
1041 * __assign_pipe_for_layer() - get a pipe for layer
1042 *
1043 * This function first searches the pipe from used list, cleanup list and
1044 * destroy list. On successful search, it returns the same pipe for current
1045 * layer. It also un-stage the pipe from current mixer for used, cleanup,
1046 * destroy pipes if they switches the mixer. On failure search, it returns
1047 * the null pipe.
1048 */
1049static struct mdss_mdp_pipe *__assign_pipe_for_layer(
1050 struct msm_fb_data_type *mfd,
1051 struct mdss_mdp_mixer *mixer, u32 pipe_ndx,
1052 enum layer_pipe_q *pipe_q_type,
1053 enum mdss_mdp_pipe_rect rect_num)
1054{
1055 struct mdss_mdp_pipe *pipe = NULL;
1056 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1057 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
1058
1059 mutex_lock(&mdp5_data->list_lock);
1060 __find_pipe_in_list(&mdp5_data->pipes_used, pipe_ndx, &pipe, rect_num);
1061 if (IS_ERR_OR_NULL(pipe)) {
1062 pipe = __find_and_move_cleanup_pipe(mdp5_data,
1063 pipe_ndx, rect_num);
1064 if (IS_ERR_OR_NULL(pipe))
1065 *pipe_q_type = LAYER_USES_NEW_PIPE_Q;
1066 else
1067 *pipe_q_type = LAYER_USES_DESTROY_PIPE_Q;
1068 } else {
1069 *pipe_q_type = LAYER_USES_USED_PIPE_Q;
1070 }
1071 mutex_unlock(&mdp5_data->list_lock);
1072
1073 /* found the pipe from used, destroy or cleanup list */
1074 if (!IS_ERR_OR_NULL(pipe)) {
1075 if (pipe->mixer_left != mixer) {
1076 if (!mixer->ctl || (mixer->ctl->mfd != mfd)) {
1077 pr_err("Can't switch mixer %d->%d pnum %d!\n",
1078 pipe->mixer_left->num, mixer->num,
1079 pipe->num);
1080 pipe = ERR_PTR(-EINVAL);
1081 goto end;
1082 }
1083 pr_debug("switching pipe%d mixer %d->%d\n",
1084 pipe->num,
1085 pipe->mixer_left ? pipe->mixer_left->num : -1,
1086 mixer->num);
1087 mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
1088 pipe->mixer_left = mixer;
1089 }
1090 goto end;
1091 }
1092
1093 pipe = mdss_mdp_pipe_assign(mdata, mixer, pipe_ndx, rect_num);
1094 if (IS_ERR_OR_NULL(pipe)) {
1095 pr_err("error reserving pipe. pipe_ndx=0x%x rect_num=%d mfd ndx=%d\n",
1096 pipe_ndx, rect_num, mfd->index);
1097 goto end;
1098 }
1099
1100 mutex_lock(&mdp5_data->list_lock);
1101 list_add(&pipe->list, &mdp5_data->pipes_used);
1102 mutex_unlock(&mdp5_data->list_lock);
1103
1104end:
1105 if (!IS_ERR_OR_NULL(pipe)) {
1106 pipe->dirty = false;
1107 pipe->params_changed++;
1108 }
1109 return pipe;
1110}
1111
1112/*
1113 * __is_sd_state_valid() - validate secure display state
1114 *
1115 * This function checks if the current state of secrure display is valid,
1116 * based on the new settings.
1117 * For command mode panels, the sd state would be invalid if a non secure pipe
1118 * comes and one of the below condition is met:
1119 * 1) Secure Display is enabled for current client, and there is other
1120 secure client.
1121 * 2) Secure Display is disabled for current client, and there is other
1122 secure client.
1123 * 3) Secure pipes are already staged for the current client.
1124 * For other panels, the sd state would be invalid if a non secure pipe comes
1125 * and one of the below condition is met:
1126 * 1) Secure Display is enabled for current or other client.
1127 * 2) Secure pipes are already staged for the current client.
1128 *
1129 */
1130static inline bool __is_sd_state_valid(uint32_t sd_pipes, uint32_t nonsd_pipes,
1131 int panel_type, u32 sd_enabled)
1132{
1133 if (panel_type == MIPI_CMD_PANEL) {
1134 if ((((mdss_get_sd_client_cnt() > 1) && sd_enabled) ||
1135 (mdss_get_sd_client_cnt() && !sd_enabled) ||
1136 sd_pipes)
1137 && nonsd_pipes)
1138 return false;
1139 } else {
1140 if ((sd_pipes || mdss_get_sd_client_cnt()) && nonsd_pipes)
1141 return false;
1142 }
1143 return true;
1144}
1145
1146/*
1147 * __validate_secure_display() - validate secure display
1148 *
1149 * This function travers through used pipe list and checks if any pipe
1150 * is with secure display enabled flag. It fails if client tries to stage
1151 * unsecure content with secure display session.
1152 *
1153 */
1154static int __validate_secure_display(struct mdss_overlay_private *mdp5_data)
1155{
1156 struct mdss_mdp_pipe *pipe, *tmp;
1157 uint32_t sd_pipes = 0, nonsd_pipes = 0;
1158 int panel_type = mdp5_data->ctl->panel_data->panel_info.type;
1159 int ret = 0;
1160
1161 mutex_lock(&mdp5_data->list_lock);
1162 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
1163 if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
1164 sd_pipes++;
1165 else
1166 nonsd_pipes++;
1167 }
1168 mutex_unlock(&mdp5_data->list_lock);
1169
1170 pr_debug("pipe count:: secure display:%d non-secure:%d\n",
1171 sd_pipes, nonsd_pipes);
1172
1173 mdp5_data->sd_transition_state = SD_TRANSITION_NONE;
1174 if (!__is_sd_state_valid(sd_pipes, nonsd_pipes, panel_type,
1175 mdp5_data->sd_enabled)) {
1176 pr_err("non-secure layer validation request during secure display session\n");
1177 pr_err(" secure client cnt:%d secure pipe cnt:%d non-secure pipe cnt:%d\n",
1178 mdss_get_sd_client_cnt(), sd_pipes, nonsd_pipes);
1179 ret = -EINVAL;
1180 } else if (!mdp5_data->sd_enabled && sd_pipes) {
1181 mdp5_data->sd_transition_state =
1182 SD_TRANSITION_NON_SECURE_TO_SECURE;
1183 } else if (mdp5_data->sd_enabled && !sd_pipes) {
1184 mdp5_data->sd_transition_state =
1185 SD_TRANSITION_SECURE_TO_NON_SECURE;
1186 }
1187 return ret;
1188}
1189
1190/*
1191 * __handle_free_list() - updates free pipe list
1192 *
1193 * This function travers through used pipe list and checks if any pipe
1194 * is not staged in current validation cycle. It moves the pipe to cleanup
1195 * list if no layer is attached for that pipe.
1196 *
1197 * This should be called after validation is successful for current cycle.
1198 * Moving pipes before can affects staged pipe for previous cycle.
1199 */
1200static void __handle_free_list(struct mdss_overlay_private *mdp5_data,
1201 struct mdss_mdp_validate_info_t *validate_info_list, u32 layer_count)
1202{
1203 int i;
1204 struct mdp_input_layer *layer;
1205 struct mdss_mdp_validate_info_t *vinfo;
1206 struct mdss_mdp_pipe *pipe, *tmp;
1207
1208 mutex_lock(&mdp5_data->list_lock);
1209 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
1210 for (i = 0; i < layer_count; i++) {
1211 vinfo = &validate_info_list[i];
1212 layer = vinfo->layer;
1213
1214 if ((pipe->ndx == layer->pipe_ndx) &&
1215 (pipe->multirect.num == vinfo->multirect.num))
1216 break;
1217 }
1218
1219 /*
1220 * if validate cycle is not attaching any layer for this
1221 * pipe then move it to cleanup list. It does overlay_unset
1222 * task.
1223 */
1224 if (i == layer_count)
1225 list_move(&pipe->list, &mdp5_data->pipes_cleanup);
1226 }
1227 mutex_unlock(&mdp5_data->list_lock);
1228}
1229
1230static bool __multirect_validate_flip(struct mdp_input_layer **layers,
1231 size_t count)
1232{
1233 /* not supporting more than 2 layers */
1234 if (count != 2)
1235 return false;
1236
1237 /* flip related validation */
1238 if ((layers[0]->flags & MDP_LAYER_FLIP_LR) ||
1239 (layers[1]->flags & MDP_LAYER_FLIP_LR)) {
1240 pr_err("multirect and HFLIP is not allowed. input layer flags=0x%x paired layer flags=0x%x\n",
1241 layers[0]->flags, layers[1]->flags);
1242 return false;
1243 }
1244 if ((layers[0]->flags & MDP_LAYER_FLIP_UD) !=
1245 (layers[1]->flags & MDP_LAYER_FLIP_UD)) {
1246 pr_err("multirect VLFIP mismatch is not allowed\n");
1247 return false;
1248 }
1249
1250 return true;
1251}
1252
1253static bool __multirect_validate_format(struct mdp_input_layer **layers,
1254 size_t count)
1255{
1256 struct mdss_mdp_format_params *rec0_fmt, *rec1_fmt;
1257 bool is_ubwc;
1258
1259 /* not supporting more than 2 layers */
1260 if (count != 2)
1261 return false;
1262
1263 /* format related validation */
1264 rec0_fmt = mdss_mdp_get_format_params(layers[0]->buffer.format);
1265 if (!rec0_fmt) {
1266 pr_err("invalid input layer format %d\n",
1267 layers[0]->buffer.format);
1268 return false;
1269 }
1270 rec1_fmt = mdss_mdp_get_format_params(layers[1]->buffer.format);
1271 if (!rec1_fmt) {
1272 pr_err("invalid paired layer format %d\n",
1273 layers[1]->buffer.format);
1274 return false;
1275 }
1276 if (rec0_fmt->is_yuv || rec1_fmt->is_yuv) {
1277 pr_err("multirect on YUV format is not supported. input=%d paired=%d\n",
1278 rec0_fmt->is_yuv, rec1_fmt->is_yuv);
1279 return false;
1280 }
1281 if (rec0_fmt->fetch_mode != rec1_fmt->fetch_mode) {
1282 pr_err("multirect fetch_mode mismatch is not allowed. input=%d paired=%d\n",
1283 rec0_fmt->fetch_mode, rec1_fmt->fetch_mode);
1284 return false;
1285 }
1286 is_ubwc = mdss_mdp_is_ubwc_format(rec0_fmt);
1287 if (is_ubwc && (rec0_fmt != rec1_fmt)) {
1288 pr_err("multirect UBWC format mismatch is not allowed\n");
1289 return false;
1290 } else if (rec0_fmt->bpp != rec1_fmt->bpp) {
1291 pr_err("multirect linear format bpp mismatch is not allowed. input=%d paired=%d\n",
1292 rec0_fmt->bpp, rec1_fmt->bpp);
1293 return false;
1294 } else if (rec0_fmt->unpack_dx_format != rec1_fmt->unpack_dx_format) {
1295 pr_err("multirect linear format 10bit vs 8bit mismatch is not allowed. input=%d paired=%d\n",
1296 rec0_fmt->unpack_dx_format, rec1_fmt->unpack_dx_format);
1297 return false;
1298 }
1299
1300 if ((layers[0]->flags & MDP_LAYER_SOLID_FILL) !=
1301 (layers[1]->flags & MDP_LAYER_SOLID_FILL)) {
1302 pr_err("solid fill mismatch between multirect layers\n");
1303 return false;
1304 }
1305
1306 return true;
1307}
1308
1309static bool __multirect_validate_rects(struct mdp_input_layer **layers,
1310 size_t count)
1311{
1312 struct mdss_rect dst[MDSS_MDP_PIPE_MAX_RECTS];
1313 int i;
1314
1315 /* not supporting more than 2 layers */
1316 if (count != 2)
1317 return false;
1318
1319 for (i = 0; i < count; i++) {
1320 if ((layers[i]->src_rect.w != layers[i]->dst_rect.w) ||
1321 (layers[i]->src_rect.h != layers[i]->dst_rect.h)) {
1322 pr_err("multirect layers cannot have scaling: src: %dx%d dst: %dx%d\n",
1323 layers[i]->src_rect.w, layers[i]->src_rect.h,
1324 layers[i]->dst_rect.w, layers[i]->dst_rect.h);
1325 return false;
1326 }
1327
1328 dst[i] = (struct mdss_rect) {layers[i]->dst_rect.x,
1329 layers[i]->dst_rect.y,
1330 layers[i]->dst_rect.w,
1331 layers[i]->dst_rect.h};
1332 }
1333
1334 /* resolution related validation */
1335 if (mdss_rect_overlap_check(&dst[0], &dst[1])) {
1336 pr_err("multirect dst overlap is not allowed. input: %d,%d,%d,%d paired %d,%d,%d,%d\n",
1337 dst[0].x, dst[0].y, dst[0].w, dst[0].y,
1338 dst[1].x, dst[1].y, dst[1].w, dst[1].y);
1339 return false;
1340 }
1341
1342 return true;
1343}
1344
1345static bool __multirect_validate_properties(struct mdp_input_layer **layers,
1346 size_t count)
1347{
1348 /* not supporting more than 2 layers */
1349 if (count != 2)
1350 return false;
1351
1352 if ((layers[0]->flags & MDP_LAYER_ASYNC) ||
1353 (layers[1]->flags & MDP_LAYER_ASYNC)) {
1354 pr_err("ASYNC update is not allowed with multirect\n");
1355 return false;
1356 }
1357
1358 if (layers[0]->z_order == layers[1]->z_order) {
1359 pr_err("multirect layers cannot have same z_order=%d\n",
1360 layers[0]->z_order);
1361 return false;
1362 }
1363
1364 return true;
1365}
1366
1367static bool (*__multirect_validators[])(struct mdp_input_layer **layers,
1368 size_t count) = {
1369 __multirect_validate_flip,
1370 __multirect_validate_format,
1371 __multirect_validate_rects,
1372 __multirect_validate_properties,
1373};
1374
1375static inline int __multirect_layer_flags_to_mode(u32 flags)
1376{
1377 int mode;
1378
1379 if (flags & MDP_LAYER_MULTIRECT_ENABLE) {
1380 if (flags & MDP_LAYER_MULTIRECT_PARALLEL_MODE)
1381 mode = MDSS_MDP_PIPE_MULTIRECT_PARALLEL;
1382 else
1383 mode = MDSS_MDP_PIPE_MULTIRECT_SERIAL;
1384 } else {
1385 if (flags & MDP_LAYER_MULTIRECT_PARALLEL_MODE) {
1386 pr_err("Invalid parallel mode flag set without multirect enabled\n");
1387 return -EINVAL;
1388 }
1389
1390 mode = MDSS_MDP_PIPE_MULTIRECT_NONE;
1391 }
1392 return mode;
1393}
1394
1395static int __multirect_validate_mode(struct msm_fb_data_type *mfd,
1396 struct mdp_input_layer **layers,
1397 size_t count)
1398{
1399 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
1400 struct mdss_mdp_format_params *rec0_fmt;
1401 bool is_ubwc;
1402 int i, mode;
1403 struct mdp_rect *dst[MDSS_MDP_PIPE_MAX_RECTS];
1404
1405 /* not supporting more than 2 layers */
1406 if (count != 2)
1407 return false;
1408
1409 for (i = 0; i < count; i++)
1410 dst[i] = &layers[i]->dst_rect;
1411
1412 mode = __multirect_layer_flags_to_mode(layers[0]->flags);
1413
1414 /* format related validation */
1415 rec0_fmt = mdss_mdp_get_format_params(layers[0]->buffer.format);
1416 if (!rec0_fmt) {
1417 pr_err("invalid input layer format %d\n",
1418 layers[0]->buffer.format);
1419 return false;
1420 }
1421
1422 is_ubwc = mdss_mdp_is_ubwc_format(rec0_fmt);
1423
1424 if (mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL) {
1425 int threshold, yoffset;
1426
1427 if (dst[0]->y < dst[1]->y)
1428 yoffset = dst[1]->y - (dst[0]->y + dst[0]->h);
1429 else if (dst[1]->y < dst[0]->y)
1430 yoffset = dst[0]->y - (dst[1]->y + dst[1]->h);
1431 else
1432 yoffset = 0;
1433
1434 /*
1435 * time multiplexed is possible only if the y position of layers
1436 * is not overlapping and there is sufficient time to buffer
1437 * 2 lines/tiles. Otherwise use parallel fetch mode
1438 */
1439 threshold = 2;
1440 if (is_ubwc) {
1441 struct mdss_mdp_format_params_ubwc *uf;
1442
1443 /* in ubwc all layers would need to be same format */
1444 uf = (struct mdss_mdp_format_params_ubwc *)rec0_fmt;
1445 threshold *= uf->micro.tile_height;
1446 }
1447
1448 if (yoffset < threshold) {
1449 pr_err("Unable to operate in serial fetch mode with yoffset=%d dst[0]=%d,%d dst[1]=%d,%d\n",
1450 yoffset, dst[0]->y, dst[0]->h,
1451 dst[1]->y, dst[1]->h);
1452 return -EINVAL;
1453 }
1454 } else if (mode == MDSS_MDP_PIPE_MULTIRECT_PARALLEL) {
1455 u32 left_lm_w, rec0_mixer, rec1_mixer;
1456
1457 /*
1458 * For UBWC, 5 lines worth of buffering is needed in to meet
1459 * the performance which requires 2560w*4bpp*5lines = 50KB,
1460 * where 2560 is max width. Now let's say pixel ram is fixed to
1461 * 50KB then in UBWC parellel fetch, maximum width of each
1462 * rectangle would be 2560/2 = 1280.
1463 *
1464 * For Linear, this restriction is avoided because maximum
1465 * buffering of 2 lines is enough which yields to
1466 * 2560w*4bpp*2lines=20KB. Based on this, we can have 2 max
1467 * width rectangles in parrellel fetch mode.
1468 */
1469 if (is_ubwc &&
1470 ((dst[0]->w > (mdata->max_mixer_width / 2)) ||
1471 (dst[1]->w > (mdata->max_mixer_width / 2)))) {
1472 pr_err("in UBWC multirect parallel mode, max dst_w cannot be greater than %d. rec0_w=%d rec1_w=%d\n",
1473 mdata->max_mixer_width / 2,
1474 dst[0]->w, dst[1]->w);
1475 return -EINVAL;
1476 }
1477
1478 left_lm_w = left_lm_w_from_mfd(mfd);
1479 if (dst[0]->x < left_lm_w) {
1480 if (dst[0]->w > (left_lm_w - dst[0]->x)) {
1481 pr_err("multirect parallel mode, rec0 dst (%d,%d) cannot cross lm boundary (%d)\n",
1482 dst[0]->x, dst[0]->w, left_lm_w);
1483 return -EINVAL;
1484 }
1485 rec0_mixer = MDSS_MDP_MIXER_MUX_LEFT;
1486 } else {
1487 rec0_mixer = MDSS_MDP_MIXER_MUX_RIGHT;
1488 }
1489
1490 if (dst[1]->x < left_lm_w) {
1491 if (dst[0]->w > (left_lm_w - dst[0]->x)) {
1492 pr_err("multirect parallel mode, rec1 dst (%d,%d) cannot cross lm boundary (%d)\n",
1493 dst[1]->x, dst[1]->w, left_lm_w);
1494 return -EINVAL;
1495 }
1496 rec1_mixer = MDSS_MDP_MIXER_MUX_LEFT;
1497 } else {
1498 rec1_mixer = MDSS_MDP_MIXER_MUX_RIGHT;
1499 }
1500
1501 if (rec0_mixer != rec1_mixer) {
1502 pr_err("multirect parallel mode mixer mismatch. rec0_mix=%d rec1_mix=%d\n",
1503 rec0_mixer, rec1_mixer);
1504 return -EINVAL;
1505 }
1506 } else {
1507 pr_err("Invalid multirect mode %d\n", mode);
1508 }
1509
1510 pr_debug("layer->pndx:%d mode=%d\n", layers[0]->pipe_ndx, mode);
1511
1512 return 0;
1513}
1514
1515static int __update_multirect_info(struct msm_fb_data_type *mfd,
1516 struct mdss_mdp_validate_info_t *validate_info_list,
1517 struct mdp_input_layer *layer_list, int ndx, int layer_cnt)
1518{
1519 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1520 struct mdss_mdp_validate_info_t *vinfo[MDSS_MDP_PIPE_MAX_RECTS];
1521 int i, ptype, max_rects, mode;
1522 int cnt = 1;
1523
1524 mode = __multirect_layer_flags_to_mode(layer_list[ndx].flags);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301525 if (IS_ERR_VALUE((unsigned long)mode))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301526 return mode;
1527
1528 pr_debug("layer #%d pipe_ndx=%d multirect mode=%d\n",
1529 ndx, layer_list[ndx].pipe_ndx, mode);
1530
1531 vinfo[0] = &validate_info_list[ndx];
1532 vinfo[0]->layer = &layer_list[ndx];
1533 vinfo[0]->multirect.mode = mode;
1534 vinfo[0]->multirect.num = MDSS_MDP_PIPE_RECT0;
1535 vinfo[0]->multirect.next = NULL;
1536
1537 /* nothing to be done if multirect is disabled */
1538 if (mode == MDSS_MDP_PIPE_MULTIRECT_NONE)
1539 return cnt;
1540
1541 ptype = get_pipe_type_from_ndx(layer_list[ndx].pipe_ndx);
1542 if (ptype == MDSS_MDP_PIPE_TYPE_INVALID) {
1543 pr_err("invalid pipe ndx %d\n", layer_list[ndx].pipe_ndx);
1544 return -EINVAL;
1545 }
1546
1547 max_rects = mdata->rects_per_sspp[ptype] ? : 1;
1548
1549 for (i = ndx + 1; i < layer_cnt; i++) {
1550 if (layer_list[ndx].pipe_ndx == layer_list[i].pipe_ndx) {
1551 if (cnt >= max_rects) {
1552 pr_err("more than %d layers of type %d with same pipe_ndx=%d indexes=%d %d\n",
1553 max_rects, ptype,
1554 layer_list[ndx].pipe_ndx, ndx, i);
1555 return -EINVAL;
1556 }
1557
1558 mode = __multirect_layer_flags_to_mode(
1559 layer_list[i].flags);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301560 if (IS_ERR_VALUE((unsigned long)mode))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301561 return mode;
1562
1563 if (mode != vinfo[0]->multirect.mode) {
1564 pr_err("unable to set different multirect modes for pipe_ndx=%d (%d %d)\n",
1565 layer_list[ndx].pipe_ndx, ndx, i);
1566 return -EINVAL;
1567 }
1568
1569 pr_debug("found matching pair for pipe_ndx=%d (%d %d)\n",
1570 layer_list[i].pipe_ndx, ndx, i);
1571
1572 vinfo[cnt] = &validate_info_list[i];
1573 vinfo[cnt]->multirect.num = cnt;
1574 vinfo[cnt]->multirect.next = vinfo[0]->layer;
1575 vinfo[cnt]->multirect.mode = mode;
1576 vinfo[cnt]->layer = &layer_list[i];
1577
1578 vinfo[cnt - 1]->multirect.next = vinfo[cnt]->layer;
1579 cnt++;
1580 }
1581 }
1582
1583 if (cnt == 1) {
1584 pr_err("multirect mode enabled but unable to find extra rects for pipe_ndx=%x\n",
1585 layer_list[ndx].pipe_ndx);
1586 return -EINVAL;
1587 }
1588
1589 return cnt;
1590}
1591
1592static int __validate_multirect(struct msm_fb_data_type *mfd,
1593 struct mdss_mdp_validate_info_t *validate_info_list,
1594 struct mdp_input_layer *layer_list, int ndx, int layer_cnt)
1595{
1596 struct mdp_input_layer *layers[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
1597 int i, cnt, rc;
1598
1599 cnt = __update_multirect_info(mfd, validate_info_list,
1600 layer_list, ndx, layer_cnt);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301601 if (IS_ERR_VALUE((unsigned long)cnt))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301602 return cnt;
1603
1604 if (cnt <= 1) {
1605 /* nothing to validate in single rect mode */
1606 return 0;
1607 } else if (cnt > 2) {
1608 pr_err("unsupported multirect configuration, multirect cnt=%d\n",
1609 cnt);
1610 return -EINVAL;
1611 }
1612
1613 layers[0] = validate_info_list[ndx].layer;
1614 layers[1] = validate_info_list[ndx].multirect.next;
1615
1616 for (i = 0; i < ARRAY_SIZE(__multirect_validators); i++) {
1617 if (!__multirect_validators[i](layers, cnt))
1618 return -EINVAL;
1619 }
1620
1621 rc = __multirect_validate_mode(mfd, layers, cnt);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301622 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301623 return rc;
1624
1625 return 0;
1626}
1627
1628/*
1629 * __validate_layers() - validate input layers
1630 * @mfd: Framebuffer data structure for display
1631 * @commit: Commit version-1 structure for display
1632 *
1633 * This function validates all input layers present in layer_list. In case
1634 * of failure, it updates the "error_code" for failed layer. It is possible
1635 * to find failed layer from layer_list based on "error_code".
1636 */
1637static int __validate_layers(struct msm_fb_data_type *mfd,
1638 struct file *file, struct mdp_layer_commit_v1 *commit)
1639{
1640 int ret, i = 0;
1641 int rec_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
1642 int rec_release_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
1643 int rec_destroy_ndx[MDSS_MDP_PIPE_MAX_RECTS] = { 0 };
1644 u32 left_lm_layers = 0, right_lm_layers = 0;
1645 u32 left_cnt = 0, right_cnt = 0;
1646 u32 left_lm_w = left_lm_w_from_mfd(mfd);
1647 u32 mixer_mux, dst_x;
1648 int layer_count = commit->input_layer_cnt;
1649
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301650 struct mdss_mdp_pipe *pipe = NULL, *tmp, *left_blend_pipe;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301651 struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM] = {0};
1652 struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM] = {0};
1653 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
1654
1655 struct mdss_mdp_mixer *mixer = NULL;
1656 struct mdp_input_layer *layer, *prev_layer, *layer_list;
1657 struct mdss_mdp_validate_info_t *validate_info_list = NULL;
1658 bool is_single_layer = false, force_validate;
1659 enum layer_pipe_q pipe_q_type;
1660 enum layer_zorder_used zorder_used[MDSS_MDP_MAX_STAGE] = {0};
1661 enum mdss_mdp_pipe_rect rect_num;
1662
1663 ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
1664 if (ret)
1665 return ret;
1666
1667 if (!layer_count)
1668 goto validate_skip;
1669
1670 layer_list = commit->input_layers;
1671
1672 validate_info_list = kcalloc(layer_count, sizeof(*validate_info_list),
1673 GFP_KERNEL);
1674 if (!validate_info_list) {
1675 ret = -ENOMEM;
1676 goto end;
1677 }
1678
1679 for (i = 0; i < layer_count; i++) {
1680 if (layer_list[i].dst_rect.x >= left_lm_w)
1681 right_lm_layers++;
1682 else
1683 left_lm_layers++;
1684
1685 if (right_lm_layers >= MAX_PIPES_PER_LM ||
1686 left_lm_layers >= MAX_PIPES_PER_LM) {
1687 pr_err("too many pipes stagged mixer left: %d mixer right:%d\n",
1688 left_lm_layers, right_lm_layers);
1689 ret = -EINVAL;
1690 goto end;
1691 }
1692
1693 if (!validate_info_list[i].layer) {
1694 ret = __validate_multirect(mfd, validate_info_list,
1695 layer_list, i, layer_count);
1696 if (ret) {
1697 pr_err("error validating multirect config. ret=%d i=%d\n",
1698 ret, i);
1699 goto end;
1700 }
1701 }
1702
1703 rect_num = validate_info_list[i].multirect.num;
1704 WARN_ON(rect_num >= MDSS_MDP_PIPE_MAX_RECTS);
1705
1706 if (rec_ndx[rect_num] & layer_list[i].pipe_ndx) {
1707 pr_err("duplicate layer found pipe_ndx=%d rect=%d (0x%x)\n",
1708 layer_list[i].pipe_ndx, rect_num,
1709 rec_ndx[rect_num]);
1710 ret = -EINVAL;
1711 goto end;
1712 }
1713
1714 rec_ndx[rect_num] |= layer_list[i].pipe_ndx;
1715 }
1716
1717 /*
1718 * Force all layers to go through full validation after
1719 * dynamic resolution switch, immaterial of the configs in
1720 * the layer.
1721 */
1722 mutex_lock(&mfd->switch_lock);
1723 force_validate = (mfd->switch_state != MDSS_MDP_NO_UPDATE_REQUESTED);
1724 mutex_unlock(&mfd->switch_lock);
1725
1726 for (i = 0; i < layer_count; i++) {
1727 enum layer_zorder_used z = LAYER_ZORDER_NONE;
1728
1729 layer = &layer_list[i];
1730 dst_x = layer->dst_rect.x;
1731 left_blend_pipe = NULL;
1732
1733 prev_layer = (i > 0) ? &layer_list[i - 1] : NULL;
1734 /*
1735 * check if current layer is at same z_order as
1736 * previous one, and fail if any or both are async layers,
1737 * as async layers should have unique z_order.
1738 *
1739 * If it has same z_order and qualifies as a right blend,
1740 * pass a pointer to the pipe representing previous overlay or
1741 * in other terms left blend layer.
1742 *
1743 * Following logic of selecting left_blend has an inherent
1744 * assumption that layer list is sorted on dst_x within a
1745 * same z_order. Otherwise it will fail based on z_order checks.
1746 */
1747 if (prev_layer && (prev_layer->z_order == layer->z_order)) {
1748 struct mdp_rect *left = &prev_layer->dst_rect;
1749 struct mdp_rect *right = &layer->dst_rect;
1750
1751 if ((layer->flags & MDP_LAYER_ASYNC)
1752 || (prev_layer->flags & MDP_LAYER_ASYNC)) {
1753 ret = -EINVAL;
1754 layer->error_code = ret;
1755 pr_err("async layer should have unique z_order\n");
1756 goto validate_exit;
1757 }
1758
1759 /*
1760 * check if layer is right blend by checking it's
1761 * directly to the right.
1762 */
1763 if (((left->x + left->w) == right->x) &&
1764 (left->y == right->y) && (left->h == right->h))
1765 left_blend_pipe = pipe;
1766
1767 /*
1768 * if the layer is right at the left lm boundary and
1769 * src split is not required then right blend is not
1770 * required as it will lie only on the left mixer
1771 */
1772 if (!__layer_needs_src_split(prev_layer) &&
1773 ((left->x + left->w) == left_lm_w))
1774 left_blend_pipe = NULL;
1775 }
1776
1777 if (!is_split_lm(mfd) || __layer_needs_src_split(layer))
1778 z = LAYER_ZORDER_BOTH;
1779 else if (dst_x >= left_lm_w)
1780 z = LAYER_ZORDER_RIGHT;
1781 else if ((dst_x + layer->dst_rect.w) <= left_lm_w)
1782 z = LAYER_ZORDER_LEFT;
1783 else
1784 z = LAYER_ZORDER_BOTH;
1785
1786 if (!left_blend_pipe && (layer->z_order >= MDSS_MDP_MAX_STAGE ||
1787 (z & zorder_used[layer->z_order]))) {
1788 pr_err("invalid z_order=%d or already in use %x\n",
1789 layer->z_order, z);
1790 ret = -EINVAL;
1791 layer->error_code = ret;
1792 goto validate_exit;
1793 } else {
1794 zorder_used[layer->z_order] |= z;
1795 }
1796
1797 if ((layer->dst_rect.x < left_lm_w) ||
1798 __layer_needs_src_split(layer)) {
1799 is_single_layer = (left_lm_layers == 1);
1800 mixer_mux = MDSS_MDP_MIXER_MUX_LEFT;
1801 } else {
1802 is_single_layer = (right_lm_layers == 1);
1803 mixer_mux = MDSS_MDP_MIXER_MUX_RIGHT;
1804 }
1805
1806 /**
1807 * search pipe in current used list to find if parameters
1808 * are same. validation can be skipped if only buffer handle
1809 * is changed.
1810 */
1811 pipe = (force_validate) ? NULL :
1812 __find_layer_in_validate_q(
1813 &validate_info_list[i], mdp5_data);
1814 if (pipe) {
1815 if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
1816 right_plist[right_cnt++] = pipe;
1817 else
1818 left_plist[left_cnt++] = pipe;
1819
1820 if (layer->flags & MDP_LAYER_PP) {
1821 memcpy(&pipe->pp_cfg, layer->pp_info,
1822 sizeof(struct mdp_overlay_pp_params));
1823 ret = mdss_mdp_pp_sspp_config(pipe);
1824 if (ret)
1825 pr_err("pp setup failed %d\n", ret);
1826 else
1827 pipe->params_changed++;
1828 }
1829 pipe->dirty = false;
1830 continue;
1831 }
1832
1833 mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
1834 if (!mixer) {
1835 pr_err("unable to get %s mixer\n",
1836 (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) ?
1837 "right" : "left");
1838 ret = -EINVAL;
1839 layer->error_code = ret;
1840 goto validate_exit;
1841 }
1842
1843 layer->z_order += MDSS_MDP_STAGE_0;
1844 ret = __validate_single_layer(mfd, &validate_info_list[i],
1845 mixer_mux);
1846 if (ret) {
1847 pr_err("layer:%d validation failed ret=%d\n", i, ret);
1848 layer->error_code = ret;
1849 goto validate_exit;
1850 }
1851
1852 rect_num = validate_info_list[i].multirect.num;
1853
1854 pipe = __assign_pipe_for_layer(mfd, mixer, layer->pipe_ndx,
1855 &pipe_q_type, rect_num);
1856 if (IS_ERR_OR_NULL(pipe)) {
1857 pr_err("error assigning pipe id=0x%x rc:%ld\n",
1858 layer->pipe_ndx, PTR_ERR(pipe));
1859 ret = PTR_ERR(pipe);
1860 layer->error_code = ret;
1861 goto validate_exit;
1862 }
1863
1864 if (pipe_q_type == LAYER_USES_NEW_PIPE_Q)
1865 rec_release_ndx[rect_num] |= pipe->ndx;
1866 if (pipe_q_type == LAYER_USES_DESTROY_PIPE_Q)
1867 rec_destroy_ndx[rect_num] |= pipe->ndx;
1868
1869 ret = mdss_mdp_pipe_map(pipe);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301870 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301871 pr_err("Unable to map used pipe%d ndx=%x\n",
1872 pipe->num, pipe->ndx);
1873 layer->error_code = ret;
1874 goto validate_exit;
1875 }
1876
1877 if (pipe_q_type == LAYER_USES_USED_PIPE_Q) {
1878 /*
1879 * reconfig is allowed on new/destroy pipes. Only used
1880 * pipe needs this extra validation.
1881 */
1882 ret = __validate_layer_reconfig(layer, pipe);
1883 if (ret) {
1884 pr_err("layer reconfig validation failed=%d\n",
1885 ret);
1886 mdss_mdp_pipe_unmap(pipe);
1887 layer->error_code = ret;
1888 goto validate_exit;
1889 }
1890 }
1891
1892 ret = __configure_pipe_params(mfd, &validate_info_list[i], pipe,
1893 left_blend_pipe, is_single_layer, mixer_mux);
1894 if (ret) {
1895 pr_err("configure pipe param failed: pipe index= %d\n",
1896 pipe->ndx);
1897 mdss_mdp_pipe_unmap(pipe);
1898 layer->error_code = ret;
1899 goto validate_exit;
1900 }
1901
1902 mdss_mdp_pipe_unmap(pipe);
1903
1904 /* keep the original copy of dst_x */
1905 pipe->layer.dst_rect.x = layer->dst_rect.x = dst_x;
1906
1907 if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
1908 right_plist[right_cnt++] = pipe;
1909 else
1910 left_plist[left_cnt++] = pipe;
1911
1912 pr_debug("id:0x%x flags:0x%x dst_x:%d\n",
1913 layer->pipe_ndx, layer->flags, layer->dst_rect.x);
1914 layer->z_order -= MDSS_MDP_STAGE_0;
1915 }
1916
1917 ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
1918 right_plist, right_cnt);
1919 if (ret) {
1920 pr_err("bw validation check failed: %d\n", ret);
1921 goto validate_exit;
1922 }
1923
1924validate_skip:
1925 __handle_free_list(mdp5_data, validate_info_list, layer_count);
1926
1927 ret = __validate_secure_display(mdp5_data);
1928
1929validate_exit:
1930 pr_debug("err=%d total_layer:%d left:%d right:%d rec0_rel_ndx=0x%x rec1_rel_ndx=0x%x rec0_destroy_ndx=0x%x rec1_destroy_ndx=0x%x processed=%d\n",
1931 ret, layer_count, left_lm_layers, right_lm_layers,
1932 rec_release_ndx[0], rec_release_ndx[1],
1933 rec_destroy_ndx[0], rec_destroy_ndx[1], i);
1934 MDSS_XLOG(rec_ndx[0], rec_ndx[1], layer_count,
1935 left_lm_layers, right_lm_layers,
1936 rec_release_ndx[0], rec_release_ndx[1],
1937 rec_destroy_ndx[0], rec_destroy_ndx[1], ret);
1938 mutex_lock(&mdp5_data->list_lock);
1939 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301940 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301941 if (((pipe->ndx & rec_release_ndx[0]) &&
1942 (pipe->multirect.num == 0)) ||
1943 ((pipe->ndx & rec_release_ndx[1]) &&
1944 (pipe->multirect.num == 1))) {
1945 mdss_mdp_smp_unreserve(pipe);
1946 pipe->params_changed = 0;
1947 pipe->dirty = true;
1948 if (!list_empty(&pipe->list))
1949 list_del_init(&pipe->list);
1950 mdss_mdp_pipe_destroy(pipe);
1951 } else if (((pipe->ndx & rec_destroy_ndx[0]) &&
1952 (pipe->multirect.num == 0)) ||
1953 ((pipe->ndx & rec_destroy_ndx[1]) &&
1954 (pipe->multirect.num == 1))) {
1955 /*
1956 * cleanup/destroy list pipes should move back
1957 * to destroy list. Next/current kickoff cycle
1958 * will release the pipe because validate also
1959 * acquires ov_lock.
1960 */
1961 list_move(&pipe->list,
1962 &mdp5_data->pipes_destroy);
1963 }
1964 } else {
1965 pipe->file = file;
1966 pr_debug("file pointer attached with pipe is %pK\n",
1967 file);
1968 }
1969 }
1970 mutex_unlock(&mdp5_data->list_lock);
1971end:
1972 kfree(validate_info_list);
1973 mutex_unlock(&mdp5_data->ov_lock);
1974
1975 pr_debug("fb%d validated layers =%d\n", mfd->index, i);
1976
1977 return ret;
1978}
1979
1980/*
1981 * __parse_frc_info() - parse frc info from userspace
1982 * @mdp5_data: mdss data per FB device
1983 * @input_frc: frc info from user space
1984 *
1985 * This function fills the FRC info of current device which will be used
1986 * during following kickoff.
1987 */
1988static void __parse_frc_info(struct mdss_overlay_private *mdp5_data,
1989 struct mdp_frc_info *input_frc)
1990{
1991 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
1992 struct mdss_mdp_frc_fsm *frc_fsm = mdp5_data->frc_fsm;
1993
1994 if (input_frc->flags & MDP_VIDEO_FRC_ENABLE) {
1995 struct mdss_mdp_frc_info *frc_info = &frc_fsm->frc_info;
1996
1997 if (!frc_fsm->enable) {
1998 /* init frc_fsm when first entry */
1999 mdss_mdp_frc_fsm_init_state(frc_fsm);
2000 /* keep vsync on when FRC is enabled */
2001 ctl->ops.add_vsync_handler(ctl,
2002 &ctl->frc_vsync_handler);
2003 }
2004
2005 frc_info->cur_frc.frame_cnt = input_frc->frame_cnt;
2006 frc_info->cur_frc.timestamp = input_frc->timestamp;
2007 } else if (frc_fsm->enable) {
2008 /* remove vsync handler when FRC is disabled */
2009 ctl->ops.remove_vsync_handler(ctl, &ctl->frc_vsync_handler);
2010 }
2011
2012 frc_fsm->enable = input_frc->flags & MDP_VIDEO_FRC_ENABLE;
2013
2014 pr_debug("frc_enable=%d\n", frc_fsm->enable);
2015}
2016
2017/*
2018 * mdss_mdp_layer_pre_commit() - pre commit validation for input layers
2019 * @mfd: Framebuffer data structure for display
2020 * @commit: Commit version-1 structure for display
2021 *
2022 * This function checks if layers present in commit request are already
2023 * validated or not. If there is mismatch in validate and commit layers
2024 * then it validate all input layers again. On successful validation, it
2025 * maps the input layer buffer and creates release/retire fences.
2026 *
2027 * This function is called from client context and can return the error.
2028 */
2029int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
2030 struct file *file, struct mdp_layer_commit_v1 *commit)
2031{
2032 int ret, i;
2033 int layer_count = commit->input_layer_cnt;
2034 bool validate_failed = false;
2035
2036 struct mdss_mdp_pipe *pipe, *tmp;
2037 struct mdp_input_layer *layer_list;
2038 struct mdss_overlay_private *mdp5_data;
2039 struct mdss_mdp_data *src_data[MDSS_MDP_MAX_SSPP];
2040 struct mdss_mdp_validate_info_t *validate_info_list;
2041
2042 mdp5_data = mfd_to_mdp5_data(mfd);
2043
2044 if (!mdp5_data || !mdp5_data->ctl)
2045 return -EINVAL;
2046
2047 layer_list = commit->input_layers;
2048
2049 /* handle null commit */
2050 if (!layer_count) {
2051 __handle_free_list(mdp5_data, NULL, layer_count);
2052 /* Check for secure state transition. */
2053 return __validate_secure_display(mdp5_data);
2054 }
2055
2056 validate_info_list = kcalloc(layer_count, sizeof(*validate_info_list),
2057 GFP_KERNEL);
2058 if (!validate_info_list)
2059 return -ENOMEM;
2060
2061 for (i = 0; i < layer_count; i++) {
2062 if (!validate_info_list[i].layer) {
2063 ret = __update_multirect_info(mfd, validate_info_list,
2064 layer_list, i, layer_count);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302065 if (IS_ERR_VALUE((unsigned long)ret)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302066 pr_err("error updating multirect config. ret=%d i=%d\n",
2067 ret, i);
2068 goto end;
2069 }
2070 }
2071 }
2072
2073 for (i = 0; i < layer_count; i++) {
2074 pipe = __find_layer_in_validate_q(&validate_info_list[i],
2075 mdp5_data);
2076 if (!pipe) {
2077 validate_failed = true;
2078 break;
2079 }
2080 }
2081
2082 if (validate_failed) {
2083 ret = __validate_layers(mfd, file, commit);
2084 if (ret) {
2085 pr_err("__validate_layers failed. rc=%d\n", ret);
2086 goto end;
2087 }
2088 } else {
2089 /*
2090 * move unassigned pipes to cleanup list since commit
2091 * supports validate+commit operation.
2092 */
2093 __handle_free_list(mdp5_data, validate_info_list, layer_count);
2094 }
2095
2096 i = 0;
2097
2098 mutex_lock(&mdp5_data->list_lock);
2099 list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
2100 if (pipe->flags & MDP_SOLID_FILL) {
2101 src_data[i] = NULL;
2102 continue;
2103 }
2104 src_data[i] = __map_layer_buffer(mfd, pipe, validate_info_list,
2105 layer_count);
2106 if (IS_ERR_OR_NULL(src_data[i++])) {
2107 i--;
2108 mutex_unlock(&mdp5_data->list_lock);
2109 ret = PTR_ERR(src_data[i]);
2110 goto map_err;
2111 }
2112 }
2113 mutex_unlock(&mdp5_data->list_lock);
2114
2115 ret = mdss_mdp_overlay_start(mfd);
2116 if (ret) {
2117 pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
2118 goto map_err;
2119 }
2120
2121 if (commit->frc_info)
2122 __parse_frc_info(mdp5_data, commit->frc_info);
2123
2124 ret = __handle_buffer_fences(mfd, commit, layer_list);
2125
2126map_err:
2127 if (ret) {
2128 mutex_lock(&mdp5_data->list_lock);
2129 for (i--; i >= 0; i--)
2130 if (src_data[i])
2131 mdss_mdp_overlay_buf_free(mfd, src_data[i]);
2132 mutex_unlock(&mdp5_data->list_lock);
2133 }
2134end:
2135 kfree(validate_info_list);
2136
2137 return ret;
2138}
2139
2140/*
2141 * mdss_mdp_layer_atomic_validate() - validate input layers
2142 * @mfd: Framebuffer data structure for display
2143 * @commit: Commit version-1 structure for display
2144 *
2145 * This function validates only input layers received from client. It
2146 * does perform any validation for mdp_output_layer defined for writeback
2147 * display.
2148 */
2149int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
2150 struct file *file, struct mdp_layer_commit_v1 *commit)
2151{
2152 struct mdss_overlay_private *mdp5_data;
2153
2154 if (!mfd || !commit) {
2155 pr_err("invalid input params\n");
2156 return -EINVAL;
2157 }
2158
2159 mdp5_data = mfd_to_mdp5_data(mfd);
2160
2161 if (!mdp5_data || !mdp5_data->ctl) {
2162 pr_err("invalid input params\n");
2163 return -ENODEV;
2164 }
2165
2166 if (mdss_fb_is_power_off(mfd)) {
2167 pr_err("display interface is in off state fb:%d\n",
2168 mfd->index);
2169 return -EPERM;
2170 }
2171
2172 return __validate_layers(mfd, file, commit);
2173}
2174
2175int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd,
2176 struct file *file, struct mdp_layer_commit_v1 *commit)
2177{
2178 int rc, count;
2179 struct mdss_overlay_private *mdp5_data;
2180 struct mdss_mdp_wfd *wfd = NULL;
2181 struct mdp_output_layer *output_layer = NULL;
2182 struct mdss_mdp_wfd_data *data = NULL;
2183 struct sync_fence *fence = NULL;
2184 struct msm_sync_pt_data *sync_pt_data = NULL;
2185
2186 if (!mfd || !commit)
2187 return -EINVAL;
2188
2189 mdp5_data = mfd_to_mdp5_data(mfd);
2190
2191 if (!mdp5_data || !mdp5_data->ctl || !mdp5_data->wfd) {
2192 pr_err("invalid wfd state\n");
2193 return -ENODEV;
2194 }
2195
2196 if (commit->output_layer) {
2197 wfd = mdp5_data->wfd;
2198 output_layer = commit->output_layer;
2199
2200 if (output_layer->buffer.plane_count > MAX_PLANES) {
2201 pr_err("Output buffer plane_count exceeds MAX_PLANES limit:%d\n",
2202 output_layer->buffer.plane_count);
2203 return -EINVAL;
2204 }
2205
2206 data = mdss_mdp_wfd_add_data(wfd, output_layer);
2207 if (IS_ERR_OR_NULL(data))
2208 return PTR_ERR(data);
2209
2210 if (output_layer->buffer.fence >= 0) {
2211 fence = sync_fence_fdget(output_layer->buffer.fence);
2212 if (!fence) {
2213 pr_err("fail to get output buffer fence\n");
2214 rc = -EINVAL;
2215 goto fence_get_err;
2216 }
2217 }
2218 } else {
2219 wfd = mdp5_data->wfd;
2220 if (!wfd->ctl || !wfd->ctl->wb) {
2221 pr_err("wfd commit with null out layer and no validate\n");
2222 return -EINVAL;
2223 }
2224 }
2225
2226 rc = mdss_mdp_layer_pre_commit(mfd, file, commit);
2227 if (rc) {
2228 pr_err("fail to import input layer buffers. rc=%d\n", rc);
2229 goto input_layer_err;
2230 }
2231
2232 if (fence) {
2233 sync_pt_data = &mfd->mdp_sync_pt_data;
2234 mutex_lock(&sync_pt_data->sync_mutex);
2235 count = sync_pt_data->acq_fen_cnt;
2236
2237 if (count >= MDP_MAX_FENCE_FD) {
2238 pr_err("Reached maximum possible value for fence count\n");
2239 mutex_unlock(&sync_pt_data->sync_mutex);
2240 rc = -EINVAL;
2241 goto input_layer_err;
2242 }
2243
2244 sync_pt_data->acq_fen[count] = fence;
2245 sync_pt_data->acq_fen_cnt++;
2246 mutex_unlock(&sync_pt_data->sync_mutex);
2247 }
2248 return rc;
2249
2250input_layer_err:
2251 if (fence)
2252 sync_fence_put(fence);
2253fence_get_err:
2254 if (data)
2255 mdss_mdp_wfd_remove_data(wfd, data);
2256 return rc;
2257}
2258
2259int mdss_mdp_layer_atomic_validate_wfd(struct msm_fb_data_type *mfd,
2260 struct file *file, struct mdp_layer_commit_v1 *commit)
2261{
2262 int rc = 0;
2263 struct mdss_overlay_private *mdp5_data;
2264 struct mdss_mdp_wfd *wfd;
2265 struct mdp_output_layer *output_layer;
2266
2267 if (!mfd || !commit)
2268 return -EINVAL;
2269
2270 mdp5_data = mfd_to_mdp5_data(mfd);
2271
2272 if (!mdp5_data || !mdp5_data->ctl || !mdp5_data->wfd) {
2273 pr_err("invalid wfd state\n");
2274 return -ENODEV;
2275 }
2276
2277 if (!commit->output_layer) {
2278 pr_err("no output layer defined\n");
2279 return -EINVAL;
2280 }
2281
2282 wfd = mdp5_data->wfd;
2283 output_layer = commit->output_layer;
2284
2285 rc = mdss_mdp_wfd_validate(wfd, output_layer);
2286 if (rc) {
2287 pr_err("fail to validate the output layer = %d\n", rc);
2288 goto validate_failed;
2289 }
2290
2291 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2292 rc = mdss_mdp_wfd_setup(wfd, output_layer);
2293 if (rc) {
2294 pr_err("fail to prepare wfd = %d\n", rc);
2295 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2296 goto validate_failed;
2297 }
2298 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2299
2300 rc = mdss_mdp_layer_atomic_validate(mfd, file, commit);
2301 if (rc) {
2302 pr_err("fail to validate the input layers = %d\n", rc);
2303 goto validate_failed;
2304 }
2305
2306validate_failed:
2307 return rc;
2308}
2309
2310int mdss_mdp_async_position_update(struct msm_fb_data_type *mfd,
2311 struct mdp_position_update *update_pos)
2312{
2313 int i, rc = 0;
2314 struct mdss_mdp_pipe *pipe = NULL;
2315 struct mdp_async_layer *layer;
2316 struct mdss_rect dst, src;
2317 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
2318 u32 flush_bits = 0, inputndx = 0;
2319
2320 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2321
2322 for (i = 0; i < update_pos->input_layer_cnt; i++) {
2323 layer = &update_pos->input_layers[i];
2324 mutex_lock(&mdp5_data->list_lock);
2325 __find_pipe_in_list(&mdp5_data->pipes_used, layer->pipe_ndx,
2326 &pipe, MDSS_MDP_PIPE_RECT0);
2327 mutex_unlock(&mdp5_data->list_lock);
2328 if (!pipe) {
2329 pr_err("invalid pipe ndx=0x%x for async update\n",
2330 layer->pipe_ndx);
2331 rc = -ENODEV;
2332 layer->error_code = rc;
2333 goto done;
2334 }
2335
2336 rc = __async_update_position_check(mfd, pipe, &layer->src,
2337 &layer->dst);
2338 if (rc) {
2339 layer->error_code = rc;
2340 goto done;
2341 }
2342
2343 src = (struct mdss_rect) {layer->src.x, layer->src.y,
2344 pipe->src.w, pipe->src.h};
2345 dst = (struct mdss_rect) {layer->dst.x, layer->dst.y,
2346 pipe->src.w, pipe->src.h};
2347
2348 pr_debug("src:{%d,%d,%d,%d}, dst:{%d,%d,%d,%d}\n",
2349 src.x, src.y, src.w, src.h,
2350 dst.x, dst.y, dst.w, dst.h);
2351
2352 mdss_mdp_pipe_position_update(pipe, &src, &dst);
2353
2354 flush_bits |= mdss_mdp_get_pipe_flush_bits(pipe);
2355 inputndx |= layer->pipe_ndx;
2356 }
2357 mdss_mdp_async_ctl_flush(mfd, flush_bits);
2358
2359done:
2360 MDSS_XLOG(inputndx, update_pos->input_layer_cnt, flush_bits, rc);
2361 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2362 return rc;
2363}
2364