blob: 0b6195d536af698492052ccb5e1f2b360d4c3c06 [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/errno.h>
17#include <linux/mutex.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/delay.h>
21#include <linux/sort.h>
22#include <linux/clk.h>
23#include <linux/bitmap.h>
24
25#include <soc/qcom/event_timer.h>
26#include "mdss_fb.h"
27#include "mdss_mdp.h"
28#include "mdss_mdp_trace.h"
29#include "mdss_debug.h"
30
31#define MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM 2
32#define NUM_MIXERCFG_REGS 3
33#define MDSS_MDP_WB_OUTPUT_BPP 3
Krishna Chaitanya Devarakondab8f7c8a2017-06-30 22:31:10 +053034#define MIN_BUS_THROUGHPUT_SCALE_FACTOR 35
Sachin Bhayareeeb88892018-01-02 16:36:01 +053035struct mdss_mdp_mixer_cfg {
36 u32 config_masks[NUM_MIXERCFG_REGS];
37 bool border_enabled;
38 bool cursor_enabled;
39};
40
41static struct {
42 u32 flush_bit;
43 struct mdss_mdp_hwio_cfg base;
44 struct mdss_mdp_hwio_cfg ext;
45 struct mdss_mdp_hwio_cfg ext2;
46} mdp_pipe_hwio[MDSS_MDP_MAX_SSPP] = {
47 [MDSS_MDP_SSPP_VIG0] = { 0, { 0, 3, 0 }, { 0, 1, 3 } },
48 [MDSS_MDP_SSPP_VIG1] = { 1, { 3, 3, 0 }, { 2, 1, 3 } },
49 [MDSS_MDP_SSPP_VIG2] = { 2, { 6, 3, 0 }, { 4, 1, 3 } },
50 [MDSS_MDP_SSPP_VIG3] = { 18, { 26, 3, 0 }, { 6, 1, 3 } },
51 [MDSS_MDP_SSPP_RGB0] = { 3, { 9, 3, 0 }, { 8, 1, 3 } },
52 [MDSS_MDP_SSPP_RGB1] = { 4, { 12, 3, 0 }, { 10, 1, 3 } },
53 [MDSS_MDP_SSPP_RGB2] = { 5, { 15, 3, 0 }, { 12, 1, 3 } },
54 [MDSS_MDP_SSPP_RGB3] = { 19, { 29, 3, 0 }, { 14, 1, 3 } },
55 [MDSS_MDP_SSPP_DMA0] = { 11, { 18, 3, 0 }, { 16, 1, 3 } },
56 [MDSS_MDP_SSPP_DMA1] = { 12, { 21, 3, 0 }, { 18, 1, 3 } },
57 [MDSS_MDP_SSPP_DMA2] = { 24, .ext2 = { 0, 4, 0 } },
58 [MDSS_MDP_SSPP_DMA3] = { 25, .ext2 = { 4, 4, 0 } },
59 [MDSS_MDP_SSPP_CURSOR0] = { 22, .ext = { 20, 4, 0 } },
60 [MDSS_MDP_SSPP_CURSOR1] = { 23, .ext = { 26, 4, 0 } },
61};
62
63static struct {
64 struct mdss_mdp_hwio_cfg ext2;
65} mdp_pipe_rec1_hwio[MDSS_MDP_MAX_SSPP] = {
66 [MDSS_MDP_SSPP_DMA0] = { .ext2 = { 8, 4, 0 } },
67 [MDSS_MDP_SSPP_DMA1] = { .ext2 = { 12, 4, 0 } },
68 [MDSS_MDP_SSPP_DMA2] = { .ext2 = { 16, 4, 0 } },
69 [MDSS_MDP_SSPP_DMA3] = { .ext2 = { 20, 4, 0 } },
70};
71
72static void __mdss_mdp_mixer_write_cfg(struct mdss_mdp_mixer *mixer,
73 struct mdss_mdp_mixer_cfg *cfg);
74
75static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
76{
77 u64 result = val;
78
79 if (val) {
Animesh Kishoredb147d62018-03-28 00:53:31 +053080 u64 temp = U64_MAX;
Sachin Bhayareeeb88892018-01-02 16:36:01 +053081
82 do_div(temp, val);
83 if (temp > numer) {
84 /* no overflow, so we can do the operation*/
85 result = (val * (u64)numer);
86 do_div(result, denom);
Animesh Kishoredb147d62018-03-28 00:53:31 +053087 } else {
88 pr_warn("Overflow, skip fudge factor\n");
Sachin Bhayareeeb88892018-01-02 16:36:01 +053089 }
90 }
91 return result;
92}
93
94static inline u64 apply_fudge_factor(u64 val,
95 struct mult_factor *factor)
96{
97 return fudge_factor(val, factor->numer, factor->denom);
98}
99
100static inline u64 apply_inverse_fudge_factor(u64 val,
101 struct mult_factor *factor)
102{
103 return fudge_factor(val, factor->denom, factor->numer);
104}
105
106static DEFINE_MUTEX(mdss_mdp_ctl_lock);
107
108static inline u64 mdss_mdp_get_pclk_rate(struct mdss_mdp_ctl *ctl)
109{
110 struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
111
112 return (ctl->intf_type == MDSS_INTF_DSI) ?
113 pinfo->mipi.dsi_pclk_rate :
114 pinfo->clk_rate;
115}
116
117static inline u32 mdss_mdp_clk_fudge_factor(struct mdss_mdp_mixer *mixer,
118 u32 rate)
119{
120 struct mdss_panel_info *pinfo = &mixer->ctl->panel_data->panel_info;
121
122 rate = apply_fudge_factor(rate, &mdss_res->clk_factor);
123
124 /*
125 * If the panel is video mode and its back porch period is
126 * small, the workaround of increasing mdp clk is needed to
127 * avoid underrun.
128 */
129 if (mixer->ctl->is_video_mode && pinfo &&
130 (pinfo->lcdc.v_back_porch < MDP_MIN_VBP))
131 rate = apply_fudge_factor(rate, &mdss_res->clk_factor);
132
133 return rate;
134}
135
136struct mdss_mdp_prefill_params {
137 u32 smp_bytes;
138 u32 xres;
139 u32 src_w;
140 u32 dst_w;
141 u32 src_h;
142 u32 dst_h;
143 u32 dst_y;
144 u32 bpp;
145 u32 pnum;
146 bool is_yuv;
147 bool is_caf;
148 bool is_fbc;
149 bool is_bwc;
150 bool is_tile;
151 bool is_hflip;
152 bool is_cmd;
153 bool is_ubwc;
154 bool is_nv12;
155};
156
157static inline bool mdss_mdp_perf_is_caf(struct mdss_mdp_pipe *pipe)
158{
159 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
160
161 /*
162 * CAF mode filter is enabled when format is yuv and
163 * upscaling. Post processing had the decision to use CAF
164 * under these conditions.
165 */
166 return ((mdata->mdp_rev >= MDSS_MDP_HW_REV_102) &&
167 pipe->src_fmt->is_yuv && ((pipe->src.h >> pipe->vert_deci) <=
168 pipe->dst.h));
169}
170
171static inline u32 mdss_mdp_calc_y_scaler_bytes(struct mdss_mdp_prefill_params
172 *params, struct mdss_prefill_data *prefill)
173{
174 u32 y_scaler_bytes = 0, y_scaler_lines = 0;
175
176 if (params->is_yuv) {
177 if (params->src_h != params->dst_h) {
178 y_scaler_lines = (params->is_caf) ?
179 prefill->y_scaler_lines_caf :
180 prefill->y_scaler_lines_bilinear;
181 /*
182 * y is src_width, u is src_width/2 and v is
183 * src_width/2, so the total is scaler_lines *
184 * src_w * 2
185 */
186 y_scaler_bytes = y_scaler_lines * params->src_w * 2;
187 }
188 } else {
189 if (params->src_h != params->dst_h) {
190 y_scaler_lines = prefill->y_scaler_lines_bilinear;
191 y_scaler_bytes = y_scaler_lines * params->src_w *
192 params->bpp;
193 }
194 }
195
196 return y_scaler_bytes;
197}
198
199static inline u32 mdss_mdp_align_latency_buf_bytes(
200 u32 latency_buf_bytes, u32 percentage,
201 u32 smp_bytes)
202{
203 u32 aligned_bytes;
204
205 aligned_bytes = ((smp_bytes - latency_buf_bytes) * percentage) / 100;
206
207 pr_debug("percentage=%d, extra_bytes(per)=%d smp_bytes=%d latency=%d\n",
208 percentage, aligned_bytes, smp_bytes, latency_buf_bytes);
209 return latency_buf_bytes + aligned_bytes;
210}
211
212/**
213 * @ mdss_mdp_calc_latency_buf_bytes() -
214 * Get the number of bytes for the
215 * latency lines.
216 * @is_yuv - true if format is yuv
217 * @is_bwc - true if BWC is enabled
218 * @is_tile - true if it is Tile format
219 * @src_w - source rectangle width
220 * @bpp - Bytes per pixel of source rectangle
221 * @use_latency_buf_percentage - use an extra percentage for
222 * the latency bytes calculation.
223 * @smp_bytes - size of the smp for alignment
224 * @is_ubwc - true if UBWC is enabled
225 * @is_nv12 - true if NV12 format is used
226 * @is_hflip - true if HFLIP is enabled
227 *
228 * Return:
229 * The amount of bytes to consider for the latency lines, where:
230 * If use_latency_buf_percentate is TRUE:
231 * Function will return the amount of bytes for the
232 * latency lines plus a percentage of the
233 * additional bytes allocated to align with the
234 * SMP size. Percentage is determined by
235 * "latency_buff_per", which can be modified
236 * through debugfs.
237 * If use_latency_buf_percentage is FALSE:
238 * Function will return only the the amount of bytes
239 * for the latency lines without any
240 * extra bytes.
241 */
242u32 mdss_mdp_calc_latency_buf_bytes(bool is_yuv, bool is_bwc,
243 bool is_tile, u32 src_w, u32 bpp, bool use_latency_buf_percentage,
244 u32 smp_bytes, bool is_ubwc, bool is_nv12, bool is_hflip)
245{
246 u32 latency_lines = 0, latency_buf_bytes;
247 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
248
249 if (is_hflip && !mdata->hflip_buffer_reused)
250 latency_lines = 1;
251
252 if (is_yuv) {
253 if (is_ubwc) {
254 if (is_nv12)
255 latency_lines += 8;
256 else
257 latency_lines += 4;
258 latency_buf_bytes = src_w * bpp * latency_lines;
259 } else if (is_bwc) {
260 latency_lines += 4;
261 latency_buf_bytes = src_w * bpp * latency_lines;
262 } else {
263 if (!mdata->hflip_buffer_reused)
264 latency_lines += 1;
265 else
266 latency_lines = 2;
267 /* multiply * 2 for the two YUV planes */
268 latency_buf_bytes = mdss_mdp_align_latency_buf_bytes(
269 src_w * bpp * latency_lines,
270 use_latency_buf_percentage ?
271 mdata->latency_buff_per : 0, smp_bytes) * 2;
272 }
273 } else {
274 if (is_ubwc) {
275 latency_lines += 4;
276 latency_buf_bytes = src_w * bpp * latency_lines;
277 } else if (is_tile) {
278 latency_lines += 8;
279 latency_buf_bytes = src_w * bpp * latency_lines;
280 } else if (is_bwc) {
281 latency_lines += 4;
282 latency_buf_bytes = src_w * bpp * latency_lines;
283 } else {
284 if (!mdata->hflip_buffer_reused)
285 latency_lines += 1;
286 else
287 latency_lines = 2;
288 latency_buf_bytes = mdss_mdp_align_latency_buf_bytes(
289 src_w * bpp * latency_lines,
290 use_latency_buf_percentage ?
291 mdata->latency_buff_per : 0, smp_bytes);
292 }
293 }
294
295 return latency_buf_bytes;
296}
297
298static inline u32 mdss_mdp_calc_scaling_w_h(u32 val, u32 src_h, u32 dst_h,
299 u32 src_w, u32 dst_w)
300{
301 if (dst_h)
302 val = mult_frac(val, src_h, dst_h);
303 if (dst_w)
304 val = mult_frac(val, src_w, dst_w);
305
306 return val;
307}
308
309static u32 mdss_mdp_perf_calc_pipe_prefill_video(struct mdss_mdp_prefill_params
310 *params)
311{
312 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
313 struct mdss_prefill_data *prefill = &mdata->prefill_data;
314 u32 prefill_bytes = 0;
315 u32 latency_buf_bytes = 0;
316 u32 y_buf_bytes = 0;
317 u32 y_scaler_bytes = 0;
318 u32 pp_bytes = 0, pp_lines = 0;
319 u32 post_scaler_bytes = 0;
320 u32 fbc_bytes = 0;
321
322 prefill_bytes = prefill->ot_bytes;
323
324 latency_buf_bytes = mdss_mdp_calc_latency_buf_bytes(params->is_yuv,
325 params->is_bwc, params->is_tile, params->src_w, params->bpp,
326 true, params->smp_bytes, params->is_ubwc, params->is_nv12,
327 params->is_hflip);
328 prefill_bytes += latency_buf_bytes;
329 pr_debug("latency_buf_bytes bw_calc=%d actual=%d\n", latency_buf_bytes,
330 params->smp_bytes);
331
332 if (params->is_yuv)
333 y_buf_bytes = prefill->y_buf_bytes;
334
335 y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill);
336
337 prefill_bytes += y_buf_bytes + y_scaler_bytes;
338
339 if (mdata->apply_post_scale_bytes || (params->src_h != params->dst_h) ||
340 (params->src_w != params->dst_w)) {
341 post_scaler_bytes = prefill->post_scaler_pixels * params->bpp;
342 post_scaler_bytes = mdss_mdp_calc_scaling_w_h(post_scaler_bytes,
343 params->src_h, params->dst_h, params->src_w,
344 params->dst_w);
345 prefill_bytes += post_scaler_bytes;
346 }
347
348 if (params->xres)
349 pp_lines = DIV_ROUND_UP(prefill->pp_pixels, params->xres);
350 if (params->xres && params->dst_h && (params->dst_y <= pp_lines))
351 pp_bytes = ((params->src_w * params->bpp * prefill->pp_pixels /
352 params->xres) * params->src_h) / params->dst_h;
353 prefill_bytes += pp_bytes;
354
355 if (params->is_fbc) {
356 fbc_bytes = prefill->fbc_lines * params->bpp;
357 fbc_bytes = mdss_mdp_calc_scaling_w_h(fbc_bytes, params->src_h,
358 params->dst_h, params->src_w, params->dst_w);
359 }
360 prefill_bytes += fbc_bytes;
361
362 trace_mdp_perf_prefill_calc(params->pnum, latency_buf_bytes,
363 prefill->ot_bytes, y_buf_bytes, y_scaler_bytes, pp_lines,
364 pp_bytes, post_scaler_bytes, fbc_bytes, prefill_bytes);
365
366 pr_debug("ot=%d y_buf=%d pp_lines=%d pp=%d post_sc=%d fbc_bytes=%d\n",
367 prefill->ot_bytes, y_buf_bytes, pp_lines, pp_bytes,
368 post_scaler_bytes, fbc_bytes);
369
370 return prefill_bytes;
371}
372
373static u32 mdss_mdp_perf_calc_pipe_prefill_cmd(struct mdss_mdp_prefill_params
374 *params)
375{
376 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
377 struct mdss_prefill_data *prefill = &mdata->prefill_data;
378 u32 prefill_bytes;
379 u32 ot_bytes = 0;
380 u32 latency_lines, latency_buf_bytes;
381 u32 y_buf_bytes = 0;
382 u32 y_scaler_bytes;
383 u32 fbc_cmd_lines = 0, fbc_cmd_bytes = 0;
384 u32 post_scaler_bytes = 0;
385
386 /* y_scaler_bytes are same for the first or non first line */
387 y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill);
388 prefill_bytes = y_scaler_bytes;
389
390 /* 1st line if fbc is not enabled and 2nd line if fbc is enabled */
391 if (((params->dst_y == 0) && !params->is_fbc) ||
392 ((params->dst_y <= 1) && params->is_fbc)) {
393 if (params->is_ubwc) {
394 if (params->is_nv12)
395 latency_lines = 8;
396 else
397 latency_lines = 4;
398 } else if (params->is_bwc || params->is_tile) {
399 latency_lines = 4;
400 } else if (params->is_hflip) {
401 latency_lines = 1;
402 } else {
403 latency_lines = 0;
404 }
405 latency_buf_bytes = params->src_w * params->bpp * latency_lines;
406 prefill_bytes += latency_buf_bytes;
407
408 fbc_cmd_lines++;
409 if (params->is_fbc)
410 fbc_cmd_lines++;
411 fbc_cmd_bytes = params->bpp * params->dst_w * fbc_cmd_lines;
412 fbc_cmd_bytes = mdss_mdp_calc_scaling_w_h(fbc_cmd_bytes,
413 params->src_h, params->dst_h, params->src_w,
414 params->dst_w);
415 prefill_bytes += fbc_cmd_bytes;
416 } else {
417 ot_bytes = prefill->ot_bytes;
418 prefill_bytes += ot_bytes;
419
420 latency_buf_bytes = mdss_mdp_calc_latency_buf_bytes(
421 params->is_yuv, params->is_bwc, params->is_tile,
422 params->src_w, params->bpp, true, params->smp_bytes,
423 params->is_ubwc, params->is_nv12, params->is_hflip);
424 prefill_bytes += latency_buf_bytes;
425
426 if (params->is_yuv)
427 y_buf_bytes = prefill->y_buf_bytes;
428 prefill_bytes += y_buf_bytes;
429
430 if (mdata->apply_post_scale_bytes ||
431 (params->src_h != params->dst_h) ||
432 (params->src_w != params->dst_w)) {
433 post_scaler_bytes = prefill->post_scaler_pixels *
434 params->bpp;
435 post_scaler_bytes = mdss_mdp_calc_scaling_w_h(
436 post_scaler_bytes, params->src_h,
437 params->dst_h, params->src_w,
438 params->dst_w);
439 prefill_bytes += post_scaler_bytes;
440 }
441 }
442
443 pr_debug("ot=%d bwc=%d smp=%d y_buf=%d fbc=%d\n", ot_bytes,
444 params->is_bwc, latency_buf_bytes, y_buf_bytes, fbc_cmd_bytes);
445
446 return prefill_bytes;
447}
448
449u32 mdss_mdp_perf_calc_pipe_prefill_single(struct mdss_mdp_prefill_params
450 *params)
451{
452 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
453 struct mdss_prefill_data *prefill = &mdata->prefill_data;
454 u32 prefill_bytes;
455 u32 latency_lines, latency_buf_bytes;
456 u32 y_scaler_bytes;
457 u32 fbc_cmd_lines = 0, fbc_cmd_bytes = 0;
458
459 if (params->is_ubwc) {
460 if (params->is_nv12)
461 latency_lines = 8;
462 else
463 latency_lines = 4;
464 } else if (params->is_bwc || params->is_tile)
465 /* can start processing after receiving 4 lines */
466 latency_lines = 4;
467 else if (params->is_hflip)
468 /* need oneline before reading backwards */
469 latency_lines = 1;
470 else
471 latency_lines = 0;
472 latency_buf_bytes = params->src_w * params->bpp * latency_lines;
473 prefill_bytes = latency_buf_bytes;
474
475 y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill);
476 prefill_bytes += y_scaler_bytes;
477
478 if (params->is_cmd)
479 fbc_cmd_lines++;
480 if (params->is_fbc)
481 fbc_cmd_lines++;
482
483 if (fbc_cmd_lines) {
484 fbc_cmd_bytes = params->bpp * params->dst_w * fbc_cmd_lines;
485 fbc_cmd_bytes = mdss_mdp_calc_scaling_w_h(fbc_cmd_bytes,
486 params->src_h, params->dst_h, params->src_w,
487 params->dst_w);
488 prefill_bytes += fbc_cmd_bytes;
489 }
490
491 return prefill_bytes;
492}
493
494u32 mdss_mdp_perf_calc_smp_size(struct mdss_mdp_pipe *pipe,
495 bool calc_smp_size)
496{
497 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
498 u32 smp_bytes;
499
500 if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
501 return 0;
502
503 /* Get allocated or fixed smp bytes */
504 smp_bytes = mdss_mdp_smp_get_size(pipe);
505
506 /*
507 * We need to calculate the SMP size for scenarios where
508 * allocation have not happened yet (i.e. during prepare IOCTL).
509 */
510 if (calc_smp_size && !mdata->has_pixel_ram) {
511 u32 calc_smp_total;
512
513 calc_smp_total = mdss_mdp_smp_calc_num_blocks(pipe);
514 calc_smp_total *= mdata->smp_mb_size;
515
516 /*
517 * If the pipe has fixed SMPs, then we must consider
518 * the max smp size.
519 */
520 if (calc_smp_total > smp_bytes)
521 smp_bytes = calc_smp_total;
522 }
523
524 pr_debug("SMP size (bytes) %d for pnum=%d calc=%d\n",
525 smp_bytes, pipe->num, calc_smp_size);
526 WARN_ON(smp_bytes == 0);
527
528 return smp_bytes;
529}
530
531static void mdss_mdp_get_bw_vote_mode(void *data,
532 u32 mdp_rev, struct mdss_mdp_perf_params *perf,
533 enum perf_calc_vote_mode calc_mode, u32 flags)
534{
535
536 if (!data)
537 goto exit;
538
539 switch (mdp_rev) {
540 case MDSS_MDP_HW_REV_105:
541 case MDSS_MDP_HW_REV_109:
542 if (calc_mode == PERF_CALC_VOTE_MODE_PER_PIPE) {
543 struct mdss_mdp_mixer *mixer =
544 (struct mdss_mdp_mixer *)data;
545
546 if ((flags & PERF_CALC_PIPE_SINGLE_LAYER) &&
547 !mixer->rotator_mode &&
548 (mixer->type == MDSS_MDP_MIXER_TYPE_INTF))
549 set_bit(MDSS_MDP_BW_MODE_SINGLE_LAYER,
550 perf->bw_vote_mode);
551 } else if (calc_mode == PERF_CALC_VOTE_MODE_CTL) {
552 struct mdss_mdp_ctl *ctl = (struct mdss_mdp_ctl *)data;
553
554 if (ctl->is_video_mode &&
555 (ctl->mfd->split_mode == MDP_SPLIT_MODE_NONE))
556 set_bit(MDSS_MDP_BW_MODE_SINGLE_IF,
557 perf->bw_vote_mode);
558 }
559 break;
560 default:
561 break;
562 };
563
564 pr_debug("mode=0x%lx\n", *(perf->bw_vote_mode));
565
566exit:
567 return;
568}
569
570static u32 __calc_qseed3_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
571 struct mdss_rect src, struct mdss_rect dst, u32 src_h,
572 u32 fps, u32 v_total)
573{
574 u32 active_line_cycle, backfill_cycle, total_cycle;
Raviteja Tamatam21358bd2016-07-18 12:54:23 +0530575 u64 ver_dwnscale;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530576 u32 active_line;
577 u32 backfill_line;
578
Raviteja Tamatam21358bd2016-07-18 12:54:23 +0530579 ver_dwnscale = src_h << PHASE_STEP_SHIFT;
580 do_div(ver_dwnscale, dst.h);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530581
582 if (ver_dwnscale > (MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM
583 << PHASE_STEP_SHIFT)) {
584 active_line = MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM
585 << PHASE_STEP_SHIFT;
586 backfill_line = ver_dwnscale - active_line;
587 } else {
588 /* active line same as downscale and no backfill */
589 active_line = ver_dwnscale;
590 backfill_line = 0;
591 }
592
593 active_line_cycle = mult_frac(active_line, src.w,
594 4) >> PHASE_STEP_SHIFT; /* 4pix/clk */
595 if (active_line_cycle < dst.w)
596 active_line_cycle = dst.w;
597
598 backfill_cycle = mult_frac(backfill_line, src.w, 4) /* 4pix/clk */
599 >> PHASE_STEP_SHIFT;
600
601 total_cycle = active_line_cycle + backfill_cycle;
602
603 pr_debug("line: active=%d backfill=%d vds=%d\n",
Raviteja Tamatam21358bd2016-07-18 12:54:23 +0530604 active_line, backfill_line, (u32)ver_dwnscale);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530605 pr_debug("cycle: total=%d active=%d backfill=%d\n",
606 total_cycle, active_line_cycle, backfill_cycle);
607
608 return total_cycle * (fps * v_total);
609}
610
611static inline bool __is_vert_downscaling(u32 src_h,
Krishna Chaitanya Devarakondab8f7c8a2017-06-30 22:31:10 +0530612 struct mdss_rect dst)
613{
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530614 return (src_h > dst.h);
615}
616
Krishna Chaitanya Devarakondab8f7c8a2017-06-30 22:31:10 +0530617static inline bool __is_bus_throughput_factor_required(u32 src_h,
618 struct mdss_rect dst)
619{
620 u64 scale_factor = src_h * 10;
621
622 do_div(scale_factor, dst.h);
623 return (__is_vert_downscaling(src_h, dst) &&
624 (scale_factor >= MIN_BUS_THROUGHPUT_SCALE_FACTOR));
625}
626
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530627static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
628 struct mdss_rect src, struct mdss_rect dst,
629 u32 fps, u32 v_total, u32 flags)
630{
631 struct mdss_mdp_mixer *mixer;
632 u32 rate, src_h;
633 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
634
635 /*
636 * when doing vertical decimation lines will be skipped, hence there is
637 * no need to account for these lines in MDP clock or request bus
638 * bandwidth to fetch them.
639 */
640 mixer = pipe->mixer_left;
641 src_h = DECIMATED_DIMENSION(src.h, pipe->vert_deci);
642
643 if (mixer->rotator_mode) {
644
645 rate = pipe->src.w * pipe->src.h * fps;
646 rate /= 4; /* block mode fetch at 4 pix/clk */
647 } else if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map) &&
648 pipe->scaler.enable && __is_vert_downscaling(src_h, dst)) {
649
650 rate = __calc_qseed3_mdp_clk_rate(pipe, src, dst, src_h,
651 fps, v_total);
652 } else {
653
654 rate = dst.w;
655 if (src_h > dst.h)
656 rate = (rate * src_h) / dst.h;
657
658 rate *= v_total * fps;
659
660 /* pipes decoding BWC content have different clk requirement */
661 if (pipe->bwc_mode && !pipe->src_fmt->is_yuv &&
662 pipe->src_fmt->bpp == 4) {
663 u32 bwc_rate =
664 mult_frac((src.w * src_h * fps), v_total, dst.h << 1);
665 pr_debug("src: w:%d h:%d fps:%d vtotal:%d dst h:%d\n",
666 src.w, src_h, fps, v_total, dst.h);
667 pr_debug("pipe%d: bwc_rate=%d normal_rate=%d\n",
668 pipe->num, bwc_rate, rate);
669 rate = max(bwc_rate, rate);
670 }
671 }
672
Krishna Chaitanya Devarakondab8f7c8a2017-06-30 22:31:10 +0530673 /*
674 * If the downscale factor is >= 3.5 for a 32 BPP surface,
675 * it is recommended to add a 10% bus throughput factor to
676 * the clock rate.
677 */
678 if ((pipe->src_fmt->bpp == 4) &&
679 __is_bus_throughput_factor_required(src_h, dst))
680 rate = apply_fudge_factor(rate, &mdata->bus_throughput_factor);
681
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530682 if (flags & PERF_CALC_PIPE_APPLY_CLK_FUDGE)
683 rate = mdss_mdp_clk_fudge_factor(mixer, rate);
684
685 return rate;
686}
687
688static u32 mdss_mdp_get_rotator_fps(struct mdss_mdp_pipe *pipe)
689{
690 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
691 u32 fps;
692
693 if (pipe->src.w >= 3840 || pipe->src.h >= 3840)
694 fps = ROTATOR_LOW_FRAME_RATE;
695 else if (mdata->traffic_shaper_en)
696 fps = DEFAULT_ROTATOR_FRAME_RATE;
697 else if (pipe->frame_rate)
698 fps = pipe->frame_rate;
699 else
700 fps = DEFAULT_FRAME_RATE;
701
702 pr_debug("rotator fps:%d\n", fps);
703
704 return fps;
705}
706
707int mdss_mdp_get_panel_params(struct mdss_mdp_pipe *pipe,
708 struct mdss_mdp_mixer *mixer, u32 *fps, u32 *v_total,
709 u32 *h_total, u32 *xres)
710{
711
712 if (mixer->rotator_mode) {
713 *fps = mdss_mdp_get_rotator_fps(pipe);
714 } else if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
715 struct mdss_panel_info *pinfo;
716
717 if (!mixer->ctl)
718 return -EINVAL;
719
720 pinfo = &mixer->ctl->panel_data->panel_info;
721 if (pinfo->type == MIPI_VIDEO_PANEL) {
722 *fps = pinfo->panel_max_fps;
723 *v_total = pinfo->panel_max_vtotal;
724 } else {
725 *fps = mdss_panel_get_framerate(pinfo,
726 FPS_RESOLUTION_HZ);
727 *v_total = mdss_panel_get_vtotal(pinfo);
728 }
729 *xres = get_panel_width(mixer->ctl);
730 *h_total = mdss_panel_get_htotal(pinfo, false);
731
732 if (is_pingpong_split(mixer->ctl->mfd))
733 *h_total += mdss_panel_get_htotal(
734 &mixer->ctl->panel_data->next->panel_info,
735 false);
736 } else {
737 *v_total = mixer->height;
738 *xres = mixer->width;
739 *h_total = mixer->width;
740 *fps = DEFAULT_FRAME_RATE;
741 }
742
743 return 0;
744}
745
746int mdss_mdp_get_pipe_overlap_bw(struct mdss_mdp_pipe *pipe,
747 struct mdss_rect *roi, u64 *quota, u64 *quota_nocr, u32 flags)
748{
749 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
750 struct mdss_mdp_mixer *mixer = pipe->mixer_left;
751 struct mdss_rect src, dst;
752 u32 v_total = 0, h_total = 0, xres = 0, src_h = 0;
753 u32 fps = DEFAULT_FRAME_RATE;
754 *quota = 0;
755 *quota_nocr = 0;
756
757 if (mdss_mdp_get_panel_params(pipe, mixer, &fps, &v_total,
758 &h_total, &xres)) {
759 pr_err(" error retreiving the panel params!\n");
760 return -EINVAL;
761 }
762
763 dst = pipe->dst;
764 src = pipe->src;
765
766 /* crop rectangles */
767 if (roi && !mixer->ctl->is_video_mode && !pipe->src_split_req)
768 mdss_mdp_crop_rect(&src, &dst, roi);
769
770 /*
771 * when doing vertical decimation lines will be skipped, hence there is
772 * no need to account for these lines in MDP clock or request bus
773 * bandwidth to fetch them.
774 */
775 src_h = DECIMATED_DIMENSION(src.h, pipe->vert_deci);
776
777 *quota = fps * src.w * src_h;
778
779 if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
780 /*
781 * with decimation, chroma is not downsampled, this means we
782 * need to allocate bw for extra lines that will be fetched
783 */
784 if (pipe->vert_deci)
785 *quota *= 2;
786 else
787 *quota = (*quota * 3) / 2;
788 else
789 *quota *= pipe->src_fmt->bpp;
790
791 if (mixer->rotator_mode) {
792 if (test_bit(MDSS_QOS_OVERHEAD_FACTOR,
793 mdata->mdss_qos_map)) {
794 /* rotator read */
795 *quota_nocr += (*quota * 2);
796 *quota = apply_comp_ratio_factor(*quota,
797 pipe->src_fmt, &pipe->comp_ratio);
798 /*
799 * rotator write: here we are using src_fmt since
800 * current implementation only supports calculate
801 * bandwidth based in the source parameters.
802 * The correct fine-tuned calculation should use
803 * destination format and destination rectangles to
804 * calculate the bandwidth, but leaving this
805 * calculation as per current support.
806 */
807 *quota += apply_comp_ratio_factor(*quota,
808 pipe->src_fmt, &pipe->comp_ratio);
809 } else {
810 *quota *= 2; /* bus read + write */
811 }
812 } else {
813
814 *quota = DIV_ROUND_UP_ULL(*quota * v_total, dst.h);
815 if (!mixer->ctl->is_video_mode)
816 *quota = DIV_ROUND_UP_ULL(*quota * h_total, xres);
817
818 *quota_nocr = *quota;
819
820 if (test_bit(MDSS_QOS_OVERHEAD_FACTOR,
821 mdata->mdss_qos_map))
822 *quota = apply_comp_ratio_factor(*quota,
823 pipe->src_fmt, &pipe->comp_ratio);
824 }
825
826
827 pr_debug("quota:%llu nocr:%llu src.w:%d src.h%d comp:[%d, %d]\n",
828 *quota, *quota_nocr, src.w, src_h, pipe->comp_ratio.numer,
829 pipe->comp_ratio.denom);
830
831 return 0;
832}
833
834static inline bool validate_comp_ratio(struct mult_factor *factor)
835{
836 return factor->numer && factor->denom;
837}
838
Benjamin Chanff12bfe2017-05-11 10:23:12 -0400839u64 apply_comp_ratio_factor(u64 quota,
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530840 struct mdss_mdp_format_params *fmt,
841 struct mult_factor *factor)
842{
843 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
844
845 if (!mdata || !test_bit(MDSS_QOS_OVERHEAD_FACTOR,
846 mdata->mdss_qos_map))
847 return quota;
848
849 /* apply compression ratio, only for compressed formats */
850 if (mdss_mdp_is_ubwc_format(fmt) &&
851 validate_comp_ratio(factor))
852 quota = apply_inverse_fudge_factor(quota, factor);
853
854 return quota;
855}
856
857static u32 mdss_mdp_get_vbp_factor(struct mdss_mdp_ctl *ctl)
858{
859 u32 fps, v_total, vbp, vbp_fac;
860 struct mdss_panel_info *pinfo;
861
862 if (!ctl || !ctl->panel_data)
863 return 0;
864
865 pinfo = &ctl->panel_data->panel_info;
866 fps = mdss_panel_get_framerate(pinfo,
867 FPS_RESOLUTION_HZ);
868 v_total = mdss_panel_get_vtotal(pinfo);
869 vbp = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
870 vbp += pinfo->prg_fet;
871
872 vbp_fac = (vbp) ? fps * v_total / vbp : 0;
873 pr_debug("vbp_fac=%d vbp=%d v_total=%d\n", vbp_fac, vbp, v_total);
874
875 return vbp_fac;
876}
877
878static u32 mdss_mdp_get_vbp_factor_max(struct mdss_mdp_ctl *ctl)
879{
880 u32 vbp_max = 0;
881 int i;
882 struct mdss_data_type *mdata;
883
884 if (!ctl || !ctl->mdata)
885 return 0;
886
887 mdata = ctl->mdata;
888 for (i = 0; i < mdata->nctl; i++) {
889 struct mdss_mdp_ctl *ctl = mdata->ctl_off + i;
890 u32 vbp_fac;
891
892 /* skip command mode interfaces */
893 if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)
894 && !ctl->is_video_mode)
895 continue;
896
897 if (mdss_mdp_ctl_is_power_on(ctl)) {
898 vbp_fac = mdss_mdp_get_vbp_factor(ctl);
899 vbp_max = max(vbp_max, vbp_fac);
900 }
901 }
902
903 return vbp_max;
904}
905
906static u32 __calc_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
907{
908 u32 fps, v_total, vbp, vbp_fac;
909 struct mdss_panel_info *pinfo;
910
911 if (!ctl || !ctl->panel_data)
912 return 0;
913
914 pinfo = &ctl->panel_data->panel_info;
915 fps = mdss_panel_get_framerate(pinfo,
916 FPS_RESOLUTION_HZ);
917 v_total = mdss_panel_get_vtotal(pinfo);
918 vbp = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
919 vbp += pinfo->prg_fet;
920
921 vbp_fac = mult_frac(USEC_PER_SEC, vbp, fps * v_total); /* use uS */
922 pr_debug("vbp_fac=%d vbp=%d v_total=%d fps=%d\n",
923 vbp_fac, vbp, v_total, fps);
924
925 return vbp_fac;
926}
927
928static u32 __get_min_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
929{
Jayant Shekharb5331472017-02-21 14:31:41 +0530930 u32 vbp_min = UINT_MAX;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530931 int i;
932 struct mdss_data_type *mdata;
933
934 if (!ctl || !ctl->mdata)
935 return 0;
936
937 mdata = ctl->mdata;
938 for (i = 0; i < mdata->nctl; i++) {
939 struct mdss_mdp_ctl *tmp_ctl = mdata->ctl_off + i;
940 u32 vbp_fac;
941
942 /* skip command mode interfaces */
943 if (!tmp_ctl->is_video_mode)
944 continue;
945
946 if (mdss_mdp_ctl_is_power_on(tmp_ctl)) {
947 vbp_fac = __calc_prefill_line_time_us(tmp_ctl);
948 vbp_min = min(vbp_min, vbp_fac);
949 }
950 }
951
Jayant Shekharb5331472017-02-21 14:31:41 +0530952 if (vbp_min == UINT_MAX)
953 vbp_min = 0;
954
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530955 return vbp_min;
956}
957
958static u32 mdss_mdp_calc_prefill_line_time(struct mdss_mdp_ctl *ctl,
959 struct mdss_mdp_pipe *pipe)
960{
961 u32 prefill_us = 0;
962 u32 prefill_amortized = 0;
963 struct mdss_data_type *mdata;
964 struct mdss_mdp_mixer *mixer;
965 struct mdss_panel_info *pinfo;
966 u32 fps, v_total;
967
968 if (!ctl || !ctl->mdata)
969 return 0;
970
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530971 mdata = ctl->mdata;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530972 mixer = pipe->mixer_left;
973 if (!mixer)
974 return -EINVAL;
975
976 pinfo = &ctl->panel_data->panel_info;
977 fps = mdss_panel_get_framerate(pinfo,
978 FPS_RESOLUTION_HZ);
979 v_total = mdss_panel_get_vtotal(pinfo);
980
981 /* calculate the minimum prefill */
982 prefill_us = __get_min_prefill_line_time_us(ctl);
983
984 /* if pipe is amortizable, add the amortized prefill contribution */
985 if (mdss_mdp_is_amortizable_pipe(pipe, mixer, mdata)) {
986 prefill_amortized = mult_frac(USEC_PER_SEC, pipe->src.y,
987 fps * v_total);
988 prefill_us += prefill_amortized;
989 }
990
991 return prefill_us;
992}
993
994static inline bool __is_multirect_high_pipe(struct mdss_mdp_pipe *pipe)
995{
996 struct mdss_mdp_pipe *next_pipe = pipe->multirect.next;
997
998 return (pipe->src.y > next_pipe->src.y);
999}
1000
1001static u64 mdss_mdp_apply_prefill_factor(u64 prefill_bw,
1002 struct mdss_mdp_ctl *ctl, struct mdss_mdp_pipe *pipe)
1003{
1004 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1005 u64 total_prefill_bw;
1006 u32 prefill_time_us;
1007
1008 if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
1009
1010 /*
1011 * for multi-rect serial mode, only take the contribution from
1012 * pipe that belongs to the rect closest to the origin.
1013 */
1014 if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL &&
1015 __is_multirect_high_pipe(pipe)) {
1016 total_prefill_bw = 0;
1017 goto exit;
1018 }
1019
1020 prefill_time_us = mdss_mdp_calc_prefill_line_time(ctl, pipe);
1021 total_prefill_bw = prefill_time_us ? DIV_ROUND_UP_ULL(
1022 USEC_PER_SEC * prefill_bw, prefill_time_us) : 0;
1023 } else {
1024 total_prefill_bw = prefill_bw *
1025 mdss_mdp_get_vbp_factor_max(ctl);
1026 }
1027
1028exit:
1029 return total_prefill_bw;
1030}
1031
1032u64 mdss_mdp_perf_calc_simplified_prefill(struct mdss_mdp_pipe *pipe,
1033 u32 v_total, u32 fps, struct mdss_mdp_ctl *ctl)
1034{
1035 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1036 struct simplified_prefill_factors *pfactors =
1037 &mdata->prefill_data.prefill_factors;
1038 u64 prefill_per_pipe = 0;
1039 u32 prefill_lines = pfactors->xtra_ff_factor;
1040
1041
1042 /* do not calculate prefill for command mode */
1043 if (!ctl->is_video_mode)
1044 goto exit;
1045
1046 prefill_per_pipe = pipe->src.w * pipe->src_fmt->bpp;
1047
1048 /* format factors */
1049 if (mdss_mdp_is_tile_format(pipe->src_fmt)) {
1050 if (mdss_mdp_is_nv12_format(pipe->src_fmt))
1051 prefill_lines += pfactors->fmt_mt_nv12_factor;
1052 else
1053 prefill_lines += pfactors->fmt_mt_factor;
1054 } else {
1055 prefill_lines += pfactors->fmt_linear_factor;
1056 }
1057
1058 /* scaling factors */
1059 if (pipe->src.h > pipe->dst.h) {
1060 prefill_lines += pfactors->scale_factor;
1061
1062 prefill_per_pipe = fudge_factor(prefill_per_pipe,
1063 DECIMATED_DIMENSION(pipe->src.h, pipe->vert_deci),
1064 pipe->dst.h);
1065 }
1066
1067 prefill_per_pipe *= prefill_lines;
1068 prefill_per_pipe = mdss_mdp_apply_prefill_factor(prefill_per_pipe,
1069 ctl, pipe);
1070
1071 pr_debug("pipe src: %dx%d bpp:%d\n",
1072 pipe->src.w, pipe->src.h, pipe->src_fmt->bpp);
1073 pr_debug("ff_factor:%d mt_nv12:%d mt:%d\n",
1074 pfactors->xtra_ff_factor,
1075 (mdss_mdp_is_tile_format(pipe->src_fmt) &&
1076 mdss_mdp_is_nv12_format(pipe->src_fmt)) ?
1077 pfactors->fmt_mt_nv12_factor : 0,
1078 mdss_mdp_is_tile_format(pipe->src_fmt) ?
1079 pfactors->fmt_mt_factor : 0);
1080 pr_debug("pipe prefill:%llu lines:%d\n",
1081 prefill_per_pipe, prefill_lines);
1082
1083exit:
1084 return prefill_per_pipe;
1085}
1086
1087/**
1088 * mdss_mdp_perf_calc_pipe() - calculate performance numbers required by pipe
1089 * @pipe: Source pipe struct containing updated pipe params
1090 * @perf: Structure containing values that should be updated for
1091 * performance tuning
1092 * @flags: flags to determine how to perform some of the
1093 * calculations, supported flags:
1094 *
1095 * PERF_CALC_PIPE_APPLY_CLK_FUDGE:
1096 * Determine if mdp clock fudge is applicable.
1097 * PERF_CALC_PIPE_SINGLE_LAYER:
1098 * Indicate if the calculation is for a single pipe staged
1099 * in the layer mixer
1100 * PERF_CALC_PIPE_CALC_SMP_SIZE:
1101 * Indicate if the smp size needs to be calculated, this is
1102 * for the cases where SMP haven't been allocated yet, so we need
1103 * to estimate here the smp size (i.e. PREPARE IOCTL).
1104 *
1105 * Function calculates the minimum required performance calculations in order
1106 * to avoid MDP underflow. The calculations are based on the way MDP
1107 * fetches (bandwidth requirement) and processes data through MDP pipeline
1108 * (MDP clock requirement) based on frame size and scaling requirements.
1109 */
1110
1111int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
1112 struct mdss_mdp_perf_params *perf, struct mdss_rect *roi,
1113 u32 flags)
1114{
1115 struct mdss_mdp_mixer *mixer;
1116 int fps = DEFAULT_FRAME_RATE;
1117 u32 v_total = 0, src_h, xres = 0, h_total = 0;
1118 struct mdss_rect src, dst;
1119 bool is_fbc = false;
1120 struct mdss_mdp_prefill_params prefill_params;
1121 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1122 bool calc_smp_size = false;
1123
1124 if (!pipe || !perf || !pipe->mixer_left)
1125 return -EINVAL;
1126
1127 mixer = pipe->mixer_left;
1128
1129 dst = pipe->dst;
1130 src = pipe->src;
1131
1132 /*
1133 * when doing vertical decimation lines will be skipped, hence there is
1134 * no need to account for these lines in MDP clock or request bus
1135 * bandwidth to fetch them.
1136 */
1137 src_h = DECIMATED_DIMENSION(src.h, pipe->vert_deci);
1138
1139 if (mdss_mdp_get_panel_params(pipe, mixer, &fps, &v_total,
1140 &h_total, &xres)) {
1141 pr_err(" error retreiving the panel params!\n");
1142 return -EINVAL;
1143 }
1144
1145 if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
1146 if (!mixer->ctl)
1147 return -EINVAL;
1148 is_fbc = mixer->ctl->panel_data->panel_info.fbc.enabled;
1149 }
1150
1151 mixer->ctl->frame_rate = fps;
1152
1153 /* crop rectangles */
1154 if (roi && !mixer->ctl->is_video_mode && !pipe->src_split_req)
1155 mdss_mdp_crop_rect(&src, &dst, roi);
1156
1157 pr_debug("v_total=%d, xres=%d fps=%d\n", v_total, xres, fps);
1158 pr_debug("src(w,h)(%d,%d) dst(w,h)(%d,%d) dst_y=%d bpp=%d yuv=%d\n",
1159 pipe->src.w, src_h, pipe->dst.w, pipe->dst.h, pipe->dst.y,
1160 pipe->src_fmt->bpp, pipe->src_fmt->is_yuv);
1161
1162 if (mdss_mdp_get_pipe_overlap_bw(pipe, roi, &perf->bw_overlap,
1163 &perf->bw_overlap_nocr, flags))
1164 pr_err("failure calculating overlap bw!\n");
1165
1166 perf->mdp_clk_rate = get_pipe_mdp_clk_rate(pipe, src, dst,
1167 fps, v_total, flags);
1168
1169 pr_debug("bw:%llu bw_nocr:%llu clk:%d\n", perf->bw_overlap,
1170 perf->bw_overlap_nocr, perf->mdp_clk_rate);
1171
1172 if (pipe->flags & MDP_SOLID_FILL)
1173 perf->bw_overlap = 0;
1174
1175 if (mixer->ctl->intf_num == MDSS_MDP_NO_INTF ||
1176 mdata->disable_prefill ||
1177 mixer->ctl->disable_prefill ||
1178 (pipe->flags & MDP_SOLID_FILL)) {
1179 perf->prefill_bytes = 0;
1180 perf->bw_prefill = 0;
1181 goto exit;
1182 }
1183
1184 if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) {
1185 perf->bw_prefill = mdss_mdp_perf_calc_simplified_prefill(pipe,
1186 v_total, fps, mixer->ctl);
1187 goto exit;
1188 }
1189
1190 calc_smp_size = (flags & PERF_CALC_PIPE_CALC_SMP_SIZE) ? true : false;
1191 prefill_params.smp_bytes = mdss_mdp_perf_calc_smp_size(pipe,
1192 calc_smp_size);
1193 prefill_params.xres = xres;
1194 prefill_params.src_w = src.w;
1195 prefill_params.src_h = src_h;
1196 prefill_params.dst_w = dst.w;
1197 prefill_params.dst_h = dst.h;
1198 prefill_params.dst_y = dst.y;
1199 prefill_params.bpp = pipe->src_fmt->bpp;
1200 prefill_params.is_yuv = pipe->src_fmt->is_yuv;
1201 prefill_params.is_caf = mdss_mdp_perf_is_caf(pipe);
1202 prefill_params.is_fbc = is_fbc;
1203 prefill_params.is_bwc = pipe->bwc_mode;
1204 prefill_params.is_tile = mdss_mdp_is_tile_format(pipe->src_fmt);
1205 prefill_params.is_hflip = pipe->flags & MDP_FLIP_LR;
1206 prefill_params.is_cmd = !mixer->ctl->is_video_mode;
1207 prefill_params.pnum = pipe->num;
1208 prefill_params.is_ubwc = mdss_mdp_is_ubwc_format(pipe->src_fmt);
1209 prefill_params.is_nv12 = mdss_mdp_is_nv12_format(pipe->src_fmt);
1210
1211 mdss_mdp_get_bw_vote_mode(mixer, mdata->mdp_rev, perf,
1212 PERF_CALC_VOTE_MODE_PER_PIPE, flags);
1213
1214 if (flags & PERF_CALC_PIPE_SINGLE_LAYER)
1215 perf->prefill_bytes =
1216 mdss_mdp_perf_calc_pipe_prefill_single(&prefill_params);
1217 else if (!prefill_params.is_cmd)
1218 perf->prefill_bytes =
1219 mdss_mdp_perf_calc_pipe_prefill_video(&prefill_params);
1220 else
1221 perf->prefill_bytes =
1222 mdss_mdp_perf_calc_pipe_prefill_cmd(&prefill_params);
1223
1224exit:
1225 pr_debug("mixer=%d pnum=%d clk_rate=%u bw_overlap=%llu bw_prefill=%llu (%d) %s\n",
1226 mixer->num, pipe->num, perf->mdp_clk_rate, perf->bw_overlap,
1227 perf->bw_prefill, perf->prefill_bytes, mdata->disable_prefill ?
1228 "prefill is disabled" : "");
1229
1230 return 0;
1231}
1232
1233static inline int mdss_mdp_perf_is_overlap(u32 y00, u32 y01, u32 y10, u32 y11)
1234{
1235 return (y10 < y00 && y11 >= y01) || (y10 >= y00 && y10 < y01);
1236}
1237
1238static inline int cmpu32(const void *a, const void *b)
1239{
1240 return (*(u32 *)a < *(u32 *)b) ? -1 : 0;
1241}
1242
1243static void mdss_mdp_perf_calc_mixer(struct mdss_mdp_mixer *mixer,
1244 struct mdss_mdp_perf_params *perf,
1245 struct mdss_mdp_pipe **pipe_list, int num_pipes,
1246 u32 flags)
1247{
1248 struct mdss_mdp_pipe *pipe;
1249 struct mdss_panel_info *pinfo = NULL;
1250 int fps = DEFAULT_FRAME_RATE;
1251 u32 v_total = 0, bpp = MDSS_MDP_WB_OUTPUT_BPP;
1252 int i;
1253 u32 max_clk_rate = 0;
1254 u64 bw_overlap_max = 0;
1255 u64 bw_overlap[MAX_PIPES_PER_LM] = { 0 };
1256 u64 bw_overlap_async = 0;
1257 u32 v_region[MAX_PIPES_PER_LM * 2] = { 0 };
1258 u32 prefill_val = 0;
1259 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1260 bool apply_fudge = true;
1261 struct mdss_mdp_format_params *fmt = NULL;
1262
1263 WARN_ON(num_pipes > MAX_PIPES_PER_LM);
1264
1265 memset(perf, 0, sizeof(*perf));
1266
1267 if (!mixer->rotator_mode) {
1268 pinfo = &mixer->ctl->panel_data->panel_info;
1269 if (!pinfo) {
1270 pr_err("pinfo is NULL\n");
1271 goto exit;
1272 }
1273
1274 if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
1275 if (pinfo->type == MIPI_VIDEO_PANEL) {
1276 fps = pinfo->panel_max_fps;
1277 v_total = pinfo->panel_max_vtotal;
1278 } else {
1279 fps = mdss_panel_get_framerate(pinfo,
1280 FPS_RESOLUTION_HZ);
1281 v_total = mdss_panel_get_vtotal(pinfo);
1282 }
1283 } else {
1284 v_total = mixer->height;
1285 }
1286
1287 /* For writeback panel, mixer type can be other than intf */
1288 if (pinfo->type == WRITEBACK_PANEL) {
1289 fmt = mdss_mdp_get_format_params(
1290 mixer->ctl->dst_format);
1291 if (fmt)
1292 bpp = fmt->bpp;
1293 pinfo = NULL;
1294 }
1295
1296 perf->mdp_clk_rate = mixer->width * v_total * fps;
1297 perf->mdp_clk_rate =
1298 mdss_mdp_clk_fudge_factor(mixer, perf->mdp_clk_rate);
1299
1300 if (!pinfo) { /* perf for bus writeback */
1301 perf->bw_writeback =
1302 fps * mixer->width * mixer->height * bpp;
1303
1304 if (test_bit(MDSS_QOS_OVERHEAD_FACTOR,
1305 mdata->mdss_qos_map))
1306 perf->bw_writeback = apply_comp_ratio_factor(
1307 perf->bw_writeback, fmt,
1308 &mixer->ctl->dst_comp_ratio);
1309
1310 } else if (pinfo->type == MIPI_CMD_PANEL) {
1311 u32 dsi_transfer_rate = mixer->width * v_total;
1312
1313 /* adjust transfer time from micro seconds */
1314 dsi_transfer_rate = mult_frac(dsi_transfer_rate,
1315 1000000, pinfo->mdp_transfer_time_us);
1316
1317 if (dsi_transfer_rate > perf->mdp_clk_rate)
1318 perf->mdp_clk_rate = dsi_transfer_rate;
1319 }
1320
1321 if (is_dsc_compression(pinfo) &&
1322 mixer->ctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE)
1323 perf->mdp_clk_rate *= 2;
1324 }
1325
1326 /*
1327 * In case of border color, we still need enough mdp clock
1328 * to avoid under-run. Clock requirement for border color is
1329 * based on mixer width.
1330 */
1331 if (num_pipes == 0)
1332 goto exit;
1333
1334 memset(bw_overlap, 0, sizeof(u64) * MAX_PIPES_PER_LM);
1335 memset(v_region, 0, sizeof(u32) * MAX_PIPES_PER_LM * 2);
1336
1337 /*
1338 * Apply this logic only for 8x26 to reduce clock rate
1339 * for single video playback use case
1340 */
1341 if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_101)
1342 && mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
1343 u32 npipes = 0;
1344
1345 for (i = 0; i < num_pipes; i++) {
1346 pipe = pipe_list[i];
1347 if (pipe) {
1348 if (npipes) {
1349 apply_fudge = true;
1350 break;
1351 }
1352 npipes++;
1353 apply_fudge = !(pipe->src_fmt->is_yuv)
1354 || !(pipe->flags
1355 & MDP_SOURCE_ROTATED_90);
1356 }
1357 }
1358 }
1359
1360 if (apply_fudge)
1361 flags |= PERF_CALC_PIPE_APPLY_CLK_FUDGE;
1362 if (num_pipes == 1)
1363 flags |= PERF_CALC_PIPE_SINGLE_LAYER;
1364
1365 for (i = 0; i < num_pipes; i++) {
1366 struct mdss_mdp_perf_params tmp;
1367
1368 memset(&tmp, 0, sizeof(tmp));
1369
1370 pipe = pipe_list[i];
1371 if (pipe == NULL)
1372 continue;
1373
1374 /*
1375 * if is pipe used across two LMs in source split configuration
1376 * then it is staged on both LMs. In such cases skip BW calc
1377 * for such pipe on right LM to prevent adding BW twice.
1378 */
1379 if (pipe->src_split_req && mixer->is_right_mixer)
1380 continue;
1381
1382 if (mdss_mdp_perf_calc_pipe(pipe, &tmp, &mixer->roi,
1383 flags))
1384 continue;
1385
1386 if (!mdss_mdp_is_nrt_ctl_path(mixer->ctl)) {
1387 u64 per_pipe_ib =
1388 test_bit(MDSS_QOS_IB_NOCR, mdata->mdss_qos_map) ?
1389 tmp.bw_overlap_nocr : tmp.bw_overlap;
1390
1391 perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
1392 per_pipe_ib);
1393 }
1394
1395 bitmap_or(perf->bw_vote_mode, perf->bw_vote_mode,
1396 tmp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
1397
1398 /*
1399 * for async layers, the overlap calculation is skipped
1400 * and the bandwidth is added at the end, accounting for
1401 * worst case, that async layer might overlap with
1402 * all the other layers.
1403 */
1404 if (pipe->async_update) {
1405 bw_overlap[i] = 0;
1406 v_region[2*i] = 0;
1407 v_region[2*i + 1] = 0;
1408 bw_overlap_async += tmp.bw_overlap;
1409 } else {
1410 bw_overlap[i] = tmp.bw_overlap;
1411 v_region[2*i] = pipe->dst.y;
1412 v_region[2*i + 1] = pipe->dst.y + pipe->dst.h;
1413 }
1414
1415 if (tmp.mdp_clk_rate > max_clk_rate)
1416 max_clk_rate = tmp.mdp_clk_rate;
1417
1418 if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
1419 prefill_val += tmp.bw_prefill;
1420 else
1421 prefill_val += tmp.prefill_bytes;
1422 }
1423
1424 /*
1425 * Sort the v_region array so the total display area can be
1426 * divided into individual regions. Check how many pipes fetch
1427 * data for each region and sum them up, then the worst case
1428 * of all regions is ib request.
1429 */
1430 sort(v_region, num_pipes * 2, sizeof(u32), cmpu32, NULL);
1431 for (i = 1; i < num_pipes * 2; i++) {
1432 int j;
1433 u64 bw_max_region = 0;
1434 u32 y0, y1;
1435
1436 pr_debug("v_region[%d]%d\n", i, v_region[i]);
1437 if (v_region[i] == v_region[i-1])
1438 continue;
1439 y0 = v_region[i-1];
1440 y1 = v_region[i];
1441 for (j = 0; j < num_pipes; j++) {
1442 if (!bw_overlap[j])
1443 continue;
1444 pipe = pipe_list[j];
1445 if (mdss_mdp_perf_is_overlap(y0, y1, pipe->dst.y,
1446 (pipe->dst.y + pipe->dst.h)))
1447 bw_max_region += bw_overlap[j];
1448 pr_debug("pipe%d rect%d: v[%d](%d,%d)pipe[%d](%d,%d)bw(%llu %llu)\n",
1449 pipe->num, pipe->multirect.num,
1450 i, y0, y1, j, pipe->dst.y,
1451 pipe->dst.y + pipe->dst.h, bw_overlap[j],
1452 bw_max_region);
1453 }
1454 bw_overlap_max = max(bw_overlap_max, bw_max_region);
1455 }
1456
1457 perf->bw_overlap += bw_overlap_max + bw_overlap_async;
1458
1459 if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
1460 perf->bw_prefill += prefill_val;
1461 else
1462 perf->prefill_bytes += prefill_val;
1463
1464 if (max_clk_rate > perf->mdp_clk_rate)
1465 perf->mdp_clk_rate = max_clk_rate;
1466
1467exit:
1468 pr_debug("final mixer=%d video=%d clk_rate=%u bw=%llu prefill=%d mode=0x%lx\n",
1469 mixer->num, mixer->ctl->is_video_mode, perf->mdp_clk_rate,
1470 perf->bw_overlap, prefill_val,
1471 *(perf->bw_vote_mode));
1472}
1473
1474static bool is_mdp_prefetch_needed(struct mdss_panel_info *pinfo)
1475{
1476 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1477 bool enable_prefetch = false;
1478
1479 if (mdata->mdp_rev >= MDSS_MDP_HW_REV_105) {
1480 if ((pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width +
1481 pinfo->lcdc.v_front_porch) < mdata->min_prefill_lines)
1482 pr_warn_once("low vbp+vfp may lead to perf issues in some cases\n");
1483
1484 enable_prefetch = true;
1485
1486 if ((pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width) >=
1487 MDSS_MDP_MAX_PREFILL_FETCH)
1488 enable_prefetch = false;
1489 } else {
1490 if ((pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width) <
1491 mdata->min_prefill_lines)
1492 pr_warn_once("low vbp may lead to display performance issues");
1493 }
1494
1495 return enable_prefetch;
1496}
1497
1498/**
1499 * mdss_mdp_get_prefetch_lines: - Number of fetch lines in vertical front porch
1500 * @pinfo: Pointer to the panel information.
1501 *
1502 * Returns the number of fetch lines in vertical front porch at which mdp
1503 * can start fetching the next frame.
1504 *
1505 * In some cases, vertical front porch is too high. In such cases limit
1506 * the mdp fetch lines as the last (25 - vbp - vpw) lines of vertical
1507 * front porch.
1508 */
1509int mdss_mdp_get_prefetch_lines(struct mdss_panel_info *pinfo)
1510{
1511 int prefetch_avail = 0;
1512 int v_total, vfp_start;
1513 u32 prefetch_needed;
1514
1515 if (!is_mdp_prefetch_needed(pinfo))
1516 return 0;
1517
1518 v_total = mdss_panel_get_vtotal(pinfo);
1519 vfp_start = (pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width +
1520 pinfo->yres);
1521
1522 prefetch_avail = v_total - vfp_start;
1523 prefetch_needed = MDSS_MDP_MAX_PREFILL_FETCH -
1524 pinfo->lcdc.v_back_porch -
1525 pinfo->lcdc.v_pulse_width;
1526
1527 if (prefetch_avail > prefetch_needed)
1528 prefetch_avail = prefetch_needed;
1529
1530 return prefetch_avail;
1531}
1532
1533static bool mdss_mdp_video_mode_intf_connected(struct mdss_mdp_ctl *ctl)
1534{
1535 int i;
1536 struct mdss_data_type *mdata;
1537
1538 if (!ctl || !ctl->mdata)
1539 return 0;
1540
1541 mdata = ctl->mdata;
1542 for (i = 0; i < mdata->nctl; i++) {
1543 struct mdss_mdp_ctl *ctl = mdata->ctl_off + i;
1544
1545 if (ctl->is_video_mode && mdss_mdp_ctl_is_power_on(ctl)) {
1546 pr_debug("video interface connected ctl:%d\n",
1547 ctl->num);
1548 return true;
1549 }
1550 }
1551
1552 return false;
1553}
1554
1555static void __mdss_mdp_perf_calc_ctl_helper(struct mdss_mdp_ctl *ctl,
1556 struct mdss_mdp_perf_params *perf,
1557 struct mdss_mdp_pipe **left_plist, int left_cnt,
1558 struct mdss_mdp_pipe **right_plist, int right_cnt,
1559 u32 flags)
1560{
1561 struct mdss_mdp_perf_params tmp;
1562 struct mdss_data_type *mdata = ctl->mdata;
1563
1564 memset(perf, 0, sizeof(*perf));
1565
1566 if (ctl->mixer_left) {
1567 mdss_mdp_perf_calc_mixer(ctl->mixer_left, &tmp,
1568 left_plist, left_cnt, flags);
1569
1570 bitmap_or(perf->bw_vote_mode, perf->bw_vote_mode,
1571 tmp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
1572
1573 perf->max_per_pipe_ib = tmp.max_per_pipe_ib;
1574 perf->bw_overlap += tmp.bw_overlap;
1575 perf->mdp_clk_rate = tmp.mdp_clk_rate;
1576 perf->bw_writeback += tmp.bw_writeback;
1577
1578 if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
1579 perf->bw_prefill += tmp.bw_prefill;
1580 else
1581 perf->prefill_bytes += tmp.prefill_bytes;
1582 }
1583
1584 if (ctl->mixer_right) {
1585 mdss_mdp_perf_calc_mixer(ctl->mixer_right, &tmp,
1586 right_plist, right_cnt, flags);
1587
1588 bitmap_or(perf->bw_vote_mode, perf->bw_vote_mode,
1589 tmp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
1590
1591 perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
1592 tmp.max_per_pipe_ib);
1593 perf->bw_overlap += tmp.bw_overlap;
1594 perf->bw_writeback += tmp.bw_writeback;
1595 if (tmp.mdp_clk_rate > perf->mdp_clk_rate)
1596 perf->mdp_clk_rate = tmp.mdp_clk_rate;
1597
1598 if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map))
1599 perf->bw_prefill += tmp.bw_prefill;
1600 else
1601 perf->prefill_bytes += tmp.prefill_bytes;
1602
1603 if (ctl->intf_type) {
1604 u64 clk_rate = mdss_mdp_get_pclk_rate(ctl);
1605 /* minimum clock rate due to inefficiency in 3dmux */
1606 clk_rate = DIV_ROUND_UP_ULL((clk_rate >> 1) * 9, 8);
1607 if (clk_rate > perf->mdp_clk_rate)
1608 perf->mdp_clk_rate = clk_rate;
1609 }
1610 }
1611
1612 /* request minimum bandwidth to have bus clock on when display is on */
1613 if (perf->bw_overlap == 0)
1614 perf->bw_overlap = SZ_16M;
1615
1616 if (!test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map) &&
1617 (ctl->intf_type != MDSS_MDP_NO_INTF)) {
1618 u32 vbp_fac = mdss_mdp_get_vbp_factor_max(ctl);
1619
1620 perf->bw_prefill = perf->prefill_bytes;
1621 /*
1622 * Prefill bandwidth equals the amount of data (number
1623 * of prefill_bytes) divided by the the amount time
1624 * available (blanking period). It is equivalent that
1625 * prefill bytes times a factor in unit Hz, which is
1626 * the reciprocal of time.
1627 */
1628 perf->bw_prefill *= vbp_fac;
1629 }
1630
1631 perf->bw_ctl = max(perf->bw_prefill, perf->bw_overlap);
1632 pr_debug("ctl=%d prefill bw=%llu overlap bw=%llu mode=0x%lx writeback:%llu\n",
1633 ctl->num, perf->bw_prefill, perf->bw_overlap,
1634 *(perf->bw_vote_mode), perf->bw_writeback);
1635}
1636
1637static u32 mdss_check_for_flip(struct mdss_mdp_ctl *ctl)
1638{
1639 u32 i, panel_orientation;
1640 struct mdss_mdp_pipe *pipe;
1641 u32 flags = 0;
1642
1643 panel_orientation = ctl->mfd->panel_orientation;
1644 if (panel_orientation & MDP_FLIP_LR)
1645 flags |= MDSS_MAX_BW_LIMIT_HFLIP;
1646 if (panel_orientation & MDP_FLIP_UD)
1647 flags |= MDSS_MAX_BW_LIMIT_VFLIP;
1648
1649 for (i = 0; i < MAX_PIPES_PER_LM; i++) {
1650 if ((flags & MDSS_MAX_BW_LIMIT_HFLIP) &&
1651 (flags & MDSS_MAX_BW_LIMIT_VFLIP))
1652 return flags;
1653
1654 if (ctl->mixer_left && ctl->mixer_left->stage_pipe[i]) {
1655 pipe = ctl->mixer_left->stage_pipe[i];
1656 if (pipe->flags & MDP_FLIP_LR)
1657 flags |= MDSS_MAX_BW_LIMIT_HFLIP;
1658 if (pipe->flags & MDP_FLIP_UD)
1659 flags |= MDSS_MAX_BW_LIMIT_VFLIP;
1660 }
1661
1662 if (ctl->mixer_right && ctl->mixer_right->stage_pipe[i]) {
1663 pipe = ctl->mixer_right->stage_pipe[i];
1664 if (pipe->flags & MDP_FLIP_LR)
1665 flags |= MDSS_MAX_BW_LIMIT_HFLIP;
1666 if (pipe->flags & MDP_FLIP_UD)
1667 flags |= MDSS_MAX_BW_LIMIT_VFLIP;
1668 }
1669 }
1670
1671 return flags;
1672}
1673
1674static int mdss_mdp_set_threshold_max_bandwidth(struct mdss_mdp_ctl *ctl)
1675{
1676 u32 mode, threshold = 0, max = INT_MAX;
1677 u32 i = 0;
1678 struct mdss_max_bw_settings *max_bw_settings =
1679 ctl->mdata->max_bw_settings;
1680
1681 if (!ctl->mdata->max_bw_settings_cnt && !ctl->mdata->max_bw_settings)
1682 return 0;
1683
1684 mode = ctl->mdata->bw_mode_bitmap;
1685
1686 if (!((mode & MDSS_MAX_BW_LIMIT_HFLIP) &&
1687 (mode & MDSS_MAX_BW_LIMIT_VFLIP)))
1688 mode |= mdss_check_for_flip(ctl);
1689
1690 pr_debug("final mode = %d, bw_mode_bitmap = %d\n", mode,
1691 ctl->mdata->bw_mode_bitmap);
1692
1693 /* Return minimum bandwidth limit */
1694 for (i = 0; i < ctl->mdata->max_bw_settings_cnt; i++) {
1695 if (max_bw_settings[i].mdss_max_bw_mode & mode) {
1696 threshold = max_bw_settings[i].mdss_max_bw_val;
1697 if (threshold < max)
1698 max = threshold;
1699 }
1700 }
1701
1702 return max;
1703}
1704
1705int mdss_mdp_perf_bw_check(struct mdss_mdp_ctl *ctl,
1706 struct mdss_mdp_pipe **left_plist, int left_cnt,
1707 struct mdss_mdp_pipe **right_plist, int right_cnt)
1708{
1709 struct mdss_data_type *mdata = ctl->mdata;
1710 struct mdss_mdp_perf_params perf;
1711 u32 bw, threshold, i, mode_switch, max_bw;
1712 u64 bw_sum_of_intfs = 0;
1713 bool is_video_mode;
1714
1715 /* we only need bandwidth check on real-time clients (interfaces) */
1716 if (ctl->intf_type == MDSS_MDP_NO_INTF)
1717 return 0;
1718
1719 __mdss_mdp_perf_calc_ctl_helper(ctl, &perf,
1720 left_plist, left_cnt, right_plist, right_cnt,
1721 PERF_CALC_PIPE_CALC_SMP_SIZE);
1722 ctl->bw_pending = perf.bw_ctl;
1723
1724 for (i = 0; i < mdata->nctl; i++) {
1725 struct mdss_mdp_ctl *temp = mdata->ctl_off + i;
1726
1727 if (temp->power_state == MDSS_PANEL_POWER_ON &&
1728 (temp->intf_type != MDSS_MDP_NO_INTF))
1729 bw_sum_of_intfs += temp->bw_pending;
1730 }
1731
1732 /* convert bandwidth to kb */
1733 bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
1734 pr_debug("calculated bandwidth=%uk\n", bw);
1735
1736 /* mfd validation happens in func */
1737 mode_switch = mdss_fb_get_mode_switch(ctl->mfd);
1738 if (mode_switch)
1739 is_video_mode = (mode_switch == MIPI_VIDEO_PANEL);
1740 else
1741 is_video_mode = ctl->is_video_mode;
1742 threshold = (is_video_mode ||
1743 mdss_mdp_video_mode_intf_connected(ctl)) ?
1744 mdata->max_bw_low : mdata->max_bw_high;
1745
1746 max_bw = mdss_mdp_set_threshold_max_bandwidth(ctl);
1747
1748 if (max_bw && (max_bw < threshold))
1749 threshold = max_bw;
1750
1751 pr_debug("final threshold bw limit = %d\n", threshold);
1752
1753 if (bw > threshold) {
1754 ctl->bw_pending = 0;
1755 pr_debug("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
1756 return -E2BIG;
1757 }
1758
1759 return 0;
1760}
1761
1762static u32 mdss_mdp_get_max_pipe_bw(struct mdss_mdp_pipe *pipe)
1763{
1764
1765 struct mdss_mdp_ctl *ctl = pipe->mixer_left->ctl;
1766 struct mdss_max_bw_settings *max_per_pipe_bw_settings;
1767 u32 flags = 0, threshold = 0, panel_orientation;
1768 u32 i, max = INT_MAX;
1769
1770 if (!ctl->mdata->mdss_per_pipe_bw_cnt
1771 && !ctl->mdata->max_per_pipe_bw_settings)
1772 return 0;
1773
1774 panel_orientation = ctl->mfd->panel_orientation;
1775 max_per_pipe_bw_settings = ctl->mdata->max_per_pipe_bw_settings;
1776
1777 /* Check for panel orienatation */
1778 panel_orientation = ctl->mfd->panel_orientation;
1779 if (panel_orientation & MDP_FLIP_LR)
1780 flags |= MDSS_MAX_BW_LIMIT_HFLIP;
1781 if (panel_orientation & MDP_FLIP_UD)
1782 flags |= MDSS_MAX_BW_LIMIT_VFLIP;
1783
1784 /* check for Hflip/Vflip in pipe */
1785 if (pipe->flags & MDP_FLIP_LR)
1786 flags |= MDSS_MAX_BW_LIMIT_HFLIP;
1787 if (pipe->flags & MDP_FLIP_UD)
1788 flags |= MDSS_MAX_BW_LIMIT_VFLIP;
1789
1790 flags |= ctl->mdata->bw_mode_bitmap;
1791
1792 for (i = 0; i < ctl->mdata->mdss_per_pipe_bw_cnt; i++) {
1793 if (max_per_pipe_bw_settings[i].mdss_max_bw_mode & flags) {
1794 threshold = max_per_pipe_bw_settings[i].mdss_max_bw_val;
1795 if (threshold < max)
1796 max = threshold;
1797 }
1798 }
1799
1800 return max;
1801}
1802
1803int mdss_mdp_perf_bw_check_pipe(struct mdss_mdp_perf_params *perf,
1804 struct mdss_mdp_pipe *pipe)
1805{
1806 struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
1807 struct mdss_mdp_ctl *ctl = pipe->mixer_left->ctl;
1808 u32 vbp_fac = 0, threshold = 0;
1809 u64 prefill_bw, pipe_bw, max_pipe_bw;
1810
1811 /* we only need bandwidth check on real-time clients (interfaces) */
1812 if (ctl->intf_type == MDSS_MDP_NO_INTF)
1813 return 0;
1814
1815 if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) {
1816 prefill_bw = perf->bw_prefill;
1817 } else {
1818 vbp_fac = mdss_mdp_get_vbp_factor_max(ctl);
1819 prefill_bw = perf->prefill_bytes * vbp_fac;
1820 }
1821 pipe_bw = max(prefill_bw, perf->bw_overlap);
1822 pr_debug("prefill=%llu, vbp_fac=%u, overlap=%llu\n",
1823 prefill_bw, vbp_fac, perf->bw_overlap);
1824
1825 /* convert bandwidth to kb */
1826 pipe_bw = DIV_ROUND_UP_ULL(pipe_bw, 1000);
1827
1828 threshold = mdata->max_bw_per_pipe;
1829 max_pipe_bw = mdss_mdp_get_max_pipe_bw(pipe);
1830
1831 if (max_pipe_bw && (max_pipe_bw < threshold))
1832 threshold = max_pipe_bw;
1833
1834 pr_debug("bw=%llu threshold=%u\n", pipe_bw, threshold);
1835
1836 if (threshold && pipe_bw > threshold) {
1837 pr_debug("pipe exceeds bandwidth: %llukb > %ukb\n", pipe_bw,
1838 threshold);
1839 return -E2BIG;
1840 }
1841
1842 return 0;
1843}
1844
1845static void mdss_mdp_perf_calc_ctl(struct mdss_mdp_ctl *ctl,
1846 struct mdss_mdp_perf_params *perf)
1847{
1848 struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM];
1849 struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM];
1850 int i, left_cnt = 0, right_cnt = 0;
1851
1852 for (i = 0; i < MAX_PIPES_PER_LM; i++) {
1853 if (ctl->mixer_left && ctl->mixer_left->stage_pipe[i]) {
1854 left_plist[left_cnt] =
1855 ctl->mixer_left->stage_pipe[i];
1856 left_cnt++;
1857 }
1858
1859 if (ctl->mixer_right && ctl->mixer_right->stage_pipe[i]) {
1860 right_plist[right_cnt] =
1861 ctl->mixer_right->stage_pipe[i];
1862 right_cnt++;
1863 }
1864 }
1865
1866 __mdss_mdp_perf_calc_ctl_helper(ctl, perf,
1867 left_plist, left_cnt, right_plist, right_cnt, 0);
1868
1869 if (ctl->is_video_mode || ((ctl->intf_type != MDSS_MDP_NO_INTF) &&
1870 mdss_mdp_video_mode_intf_connected(ctl))) {
1871 perf->bw_ctl =
1872 max(apply_fudge_factor(perf->bw_overlap,
1873 &mdss_res->ib_factor_overlap),
1874 apply_fudge_factor(perf->bw_prefill,
1875 &mdss_res->ib_factor));
1876 perf->bw_writeback = apply_fudge_factor(perf->bw_writeback,
1877 &mdss_res->ib_factor);
1878 }
1879 pr_debug("ctl=%d clk_rate=%u\n", ctl->num, perf->mdp_clk_rate);
1880 pr_debug("bw_overlap=%llu bw_prefill=%llu prefill_bytes=%d\n",
1881 perf->bw_overlap, perf->bw_prefill, perf->prefill_bytes);
1882}
1883
1884static void set_status(u32 *value, bool status, u32 bit_num)
1885{
1886 if (status)
1887 *value |= BIT(bit_num);
1888 else
1889 *value &= ~BIT(bit_num);
1890}
1891
1892/**
1893 * @ mdss_mdp_ctl_perf_set_transaction_status() -
1894 * Set the status of the on-going operations
1895 * for the command mode panels.
1896 * @ctl - pointer to a ctl
1897 *
1898 * This function is called to set the status bit in the perf_transaction_status
1899 * according to the operation that it is on-going for the command mode
1900 * panels, where:
1901 *
1902 * PERF_SW_COMMIT_STATE:
1903 * 1 - If SW operation has been committed and bw
1904 * has been requested (HW transaction have not started yet).
1905 * 0 - If there is no SW operation pending
1906 * PERF_HW_MDP_STATE:
1907 * 1 - If HW transaction is on-going
1908 * 0 - If there is no HW transaction on going (ping-pong interrupt
1909 * has finished)
1910 * Only if both states are zero there are no pending operations and
1911 * BW could be released.
1912 * State can be queried calling "mdss_mdp_ctl_perf_get_transaction_status"
1913 */
1914void mdss_mdp_ctl_perf_set_transaction_status(struct mdss_mdp_ctl *ctl,
1915 enum mdss_mdp_perf_state_type component, bool new_status)
1916{
1917 u32 previous_transaction;
1918 bool previous_status;
1919 unsigned long flags;
1920
1921 if (!ctl || !ctl->panel_data ||
1922 (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL))
1923 return;
1924
1925 spin_lock_irqsave(&ctl->spin_lock, flags);
1926
1927 previous_transaction = ctl->perf_transaction_status;
1928 previous_status = previous_transaction & BIT(component) ?
1929 PERF_STATUS_BUSY : PERF_STATUS_DONE;
1930
1931 /*
1932 * If we set "done" state when previous state was not "busy",
1933 * we want to print a warning since maybe there is a state
1934 * that we are not considering
1935 */
1936 WARN((new_status == PERF_STATUS_DONE) &&
1937 (previous_status != PERF_STATUS_BUSY),
1938 "unexpected previous state for component: %d\n", component);
1939
1940 set_status(&ctl->perf_transaction_status, new_status,
1941 (u32)component);
1942
1943 pr_debug("ctl:%d component:%d previous:%d status:%d\n",
1944 ctl->num, component, previous_transaction,
1945 ctl->perf_transaction_status);
1946 pr_debug("ctl:%d new_status:%d prev_status:%d\n",
1947 ctl->num, new_status, previous_status);
1948
1949 spin_unlock_irqrestore(&ctl->spin_lock, flags);
1950}
1951
1952/**
1953 * @ mdss_mdp_ctl_perf_get_transaction_status() -
1954 * Get the status of the on-going operations
1955 * for the command mode panels.
1956 * @ctl - pointer to a ctl
1957 *
1958 * Return:
1959 * The status of the transactions for the command mode panels,
1960 * note that the bandwidth can be released only if all transaction
1961 * status bits are zero.
1962 */
1963u32 mdss_mdp_ctl_perf_get_transaction_status(struct mdss_mdp_ctl *ctl)
1964{
1965 unsigned long flags;
1966 u32 transaction_status;
1967
1968 if (!ctl)
1969 return PERF_STATUS_BUSY;
1970
1971 /*
1972 * If Rotator mode and bandwidth has been released; return STATUS_DONE
1973 * so the bandwidth is re-calculated.
1974 */
1975 if (ctl->mixer_left && ctl->mixer_left->rotator_mode &&
1976 !ctl->perf_release_ctl_bw)
1977 return PERF_STATUS_DONE;
1978
1979 /*
1980 * If Video Mode or not valid data to determine the status, return busy
1981 * status, so the bandwidth cannot be freed by the caller
1982 */
1983 if (!ctl || !ctl->panel_data ||
1984 (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL)) {
1985 return PERF_STATUS_BUSY;
1986 }
1987
1988 spin_lock_irqsave(&ctl->spin_lock, flags);
1989 transaction_status = ctl->perf_transaction_status;
1990 spin_unlock_irqrestore(&ctl->spin_lock, flags);
1991 pr_debug("ctl:%d status:%d\n", ctl->num,
1992 transaction_status);
1993
1994 return transaction_status;
1995}
1996
1997/**
1998 * @ mdss_mdp_ctl_perf_update_traffic_shaper_bw -
1999 * Apply BW fudge factor to rotator
2000 * if mdp clock increased during
2001 * rotation session.
2002 * @ctl - pointer to the controller
2003 * @mdp_clk - new mdp clock
2004 *
2005 * If mdp clock increased and traffic shaper is enabled, we need to
2006 * account for the additional bandwidth that will be requested by
2007 * the rotator when running at a higher clock, so we apply a fudge
2008 * factor proportional to the mdp clock increment.
2009 */
2010static void mdss_mdp_ctl_perf_update_traffic_shaper_bw(struct mdss_mdp_ctl *ctl,
2011 u32 mdp_clk)
2012{
2013 if ((mdp_clk > 0) && (mdp_clk > ctl->traffic_shaper_mdp_clk)) {
2014 ctl->cur_perf.bw_ctl = fudge_factor(ctl->cur_perf.bw_ctl,
2015 mdp_clk, ctl->traffic_shaper_mdp_clk);
2016 pr_debug("traffic shaper bw:%llu, clk: %d, mdp_clk:%d\n",
2017 ctl->cur_perf.bw_ctl, ctl->traffic_shaper_mdp_clk,
2018 mdp_clk);
2019 }
2020}
2021
2022static u64 mdss_mdp_ctl_calc_client_vote(struct mdss_data_type *mdata,
2023 struct mdss_mdp_perf_params *perf, bool nrt_client, u32 mdp_clk)
2024{
2025 u64 bw_sum_of_intfs = 0;
2026 int i;
2027 struct mdss_mdp_ctl *ctl;
2028 struct mdss_mdp_mixer *mixer;
2029 struct mdss_mdp_perf_params perf_temp;
2030
2031 bitmap_zero(perf_temp.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
2032
2033 for (i = 0; i < mdata->nctl; i++) {
2034 ctl = mdata->ctl_off + i;
2035 mixer = ctl->mixer_left;
2036 if (mdss_mdp_ctl_is_power_on(ctl) &&
2037 /* RealTime clients */
2038 ((!nrt_client && ctl->mixer_left &&
2039 !ctl->mixer_left->rotator_mode) ||
2040 /* Non-RealTime clients */
2041 (nrt_client && mdss_mdp_is_nrt_ctl_path(ctl)))) {
2042 /* Skip rotation layers as bw calc by rot driver */
2043 if (ctl->mixer_left && ctl->mixer_left->rotator_mode)
2044 continue;
2045 /*
2046 * If traffic shaper is enabled we must check
2047 * if additional bandwidth is required.
2048 */
2049 if (ctl->traffic_shaper_enabled)
2050 mdss_mdp_ctl_perf_update_traffic_shaper_bw
2051 (ctl, mdp_clk);
2052
2053 mdss_mdp_get_bw_vote_mode(ctl, mdata->mdp_rev,
2054 &perf_temp, PERF_CALC_VOTE_MODE_CTL, 0);
2055
2056 bitmap_or(perf_temp.bw_vote_mode,
2057 perf_temp.bw_vote_mode,
2058 ctl->cur_perf.bw_vote_mode,
2059 MDSS_MDP_BW_MODE_MAX);
2060
2061 if (nrt_client && ctl->mixer_left &&
2062 !ctl->mixer_left->rotator_mode) {
2063 bw_sum_of_intfs += ctl->cur_perf.bw_writeback;
2064 continue;
2065 }
2066
2067 perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
2068 ctl->cur_perf.max_per_pipe_ib);
2069
2070 bw_sum_of_intfs += ctl->cur_perf.bw_ctl;
2071
2072 pr_debug("ctl_num=%d bw=%llu mode=0x%lx\n", ctl->num,
2073 ctl->cur_perf.bw_ctl,
2074 *(ctl->cur_perf.bw_vote_mode));
2075 }
2076 }
2077
2078 return bw_sum_of_intfs;
2079}
2080
Ingrid Gallardofb8a0142016-08-29 20:34:59 -07002081/* apply any adjustments to the ib quota */
2082static inline u64 __calc_bus_ib_quota(struct mdss_data_type *mdata,
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302083 struct mdss_mdp_perf_params *perf, bool nrt_client, u64 bw_vote)
2084{
Ingrid Gallardofb8a0142016-08-29 20:34:59 -07002085 u64 bus_ib_quota;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302086
2087 if (test_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map)) {
2088 if (!nrt_client)
2089 bus_ib_quota = perf->max_per_pipe_ib;
2090 else
2091 bus_ib_quota = 0;
2092 } else {
2093 bus_ib_quota = bw_vote;
2094 }
2095
2096 if (test_bit(MDSS_MDP_BW_MODE_SINGLE_LAYER,
2097 perf->bw_vote_mode) &&
2098 (bus_ib_quota >= PERF_SINGLE_PIPE_BW_FLOOR)) {
2099 struct mult_factor ib_factor_vscaling;
2100
2101 ib_factor_vscaling.numer = 2;
2102 ib_factor_vscaling.denom = 1;
2103 bus_ib_quota = apply_fudge_factor(bus_ib_quota,
2104 &ib_factor_vscaling);
2105 }
2106
2107 if (test_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map) &&
2108 !nrt_client)
2109 bus_ib_quota = apply_fudge_factor(bus_ib_quota,
2110 &mdata->per_pipe_ib_factor);
2111
Ingrid Gallardofb8a0142016-08-29 20:34:59 -07002112 return bus_ib_quota;
2113}
2114
2115static void mdss_mdp_ctl_update_client_vote(struct mdss_data_type *mdata,
2116 struct mdss_mdp_perf_params *perf, bool nrt_client, u64 bw_vote)
2117{
2118 u64 bus_ab_quota, bus_ib_quota;
2119
2120 bus_ab_quota = max(bw_vote, mdata->perf_tune.min_bus_vote);
2121 bus_ib_quota = __calc_bus_ib_quota(mdata, perf, nrt_client, bw_vote);
2122
2123
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302124 bus_ab_quota = apply_fudge_factor(bus_ab_quota, &mdss_res->ab_factor);
2125 ATRACE_INT("bus_quota", bus_ib_quota);
2126
2127 mdss_bus_scale_set_quota(nrt_client ? MDSS_MDP_NRT : MDSS_MDP_RT,
2128 bus_ab_quota, bus_ib_quota);
2129 pr_debug("client:%s ab=%llu ib=%llu\n", nrt_client ? "nrt" : "rt",
2130 bus_ab_quota, bus_ib_quota);
2131}
2132
2133static void mdss_mdp_ctl_perf_update_bus(struct mdss_data_type *mdata,
2134 struct mdss_mdp_ctl *ctl, u32 mdp_clk)
2135{
2136 u64 bw_sum_of_rt_intfs = 0, bw_sum_of_nrt_intfs = 0;
2137 struct mdss_mdp_perf_params perf = {0};
2138
2139 ATRACE_BEGIN(__func__);
2140
2141 /*
2142 * non-real time client
2143 * 1. rotator path
2144 * 2. writeback output path
2145 */
2146 if (mdss_mdp_is_nrt_ctl_path(ctl)) {
2147 bitmap_zero(perf.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
2148 bw_sum_of_nrt_intfs = mdss_mdp_ctl_calc_client_vote(mdata,
2149 &perf, true, mdp_clk);
2150 mdss_mdp_ctl_update_client_vote(mdata, &perf, true,
2151 bw_sum_of_nrt_intfs);
2152 }
2153
2154 /*
2155 * real time client
2156 * 1. any realtime interface - primary or secondary interface
2157 * 2. writeback input path
2158 */
2159 if (!mdss_mdp_is_nrt_ctl_path(ctl) ||
2160 (ctl->intf_num == MDSS_MDP_NO_INTF)) {
2161 bitmap_zero(perf.bw_vote_mode, MDSS_MDP_BW_MODE_MAX);
2162 bw_sum_of_rt_intfs = mdss_mdp_ctl_calc_client_vote(mdata,
2163 &perf, false, mdp_clk);
2164 mdss_mdp_ctl_update_client_vote(mdata, &perf, false,
2165 bw_sum_of_rt_intfs);
2166 }
2167
2168 ATRACE_END(__func__);
2169}
2170
2171/**
2172 * @mdss_mdp_ctl_perf_release_bw() - request zero bandwidth
2173 * @ctl - pointer to a ctl
2174 *
2175 * Function checks a state variable for the ctl, if all pending commit
2176 * requests are done, meaning no more bandwidth is needed, release
2177 * bandwidth request.
2178 */
2179void mdss_mdp_ctl_perf_release_bw(struct mdss_mdp_ctl *ctl)
2180{
2181 int transaction_status;
2182 struct mdss_data_type *mdata;
2183 int i;
2184
2185 /* only do this for command panel */
2186 if (!ctl || !ctl->mdata || !ctl->panel_data ||
2187 (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL))
2188 return;
2189
2190 mutex_lock(&mdss_mdp_ctl_lock);
2191 mdata = ctl->mdata;
2192 /*
2193 * If video interface present, cmd panel bandwidth cannot be
2194 * released.
2195 */
2196 for (i = 0; i < mdata->nctl; i++) {
2197 struct mdss_mdp_ctl *ctl_local = mdata->ctl_off + i;
2198
2199 if (mdss_mdp_ctl_is_power_on(ctl_local) &&
2200 ctl_local->is_video_mode)
2201 goto exit;
2202 }
2203
2204 transaction_status = mdss_mdp_ctl_perf_get_transaction_status(ctl);
2205 pr_debug("transaction_status=0x%x\n", transaction_status);
2206
2207 /*Release the bandwidth only if there are no transactions pending*/
2208 if (!transaction_status && mdata->enable_bw_release) {
2209 /*
2210 * for splitdisplay if release_bw is called using secondary
2211 * then find the main ctl and release BW for main ctl because
2212 * BW is always calculated/stored using main ctl.
2213 */
2214 struct mdss_mdp_ctl *ctl_local =
2215 mdss_mdp_get_main_ctl(ctl) ? : ctl;
2216
2217 trace_mdp_cmd_release_bw(ctl_local->num);
2218 ctl_local->cur_perf.bw_ctl = 0;
2219 ctl_local->new_perf.bw_ctl = 0;
2220 pr_debug("Release BW ctl=%d\n", ctl_local->num);
2221 mdss_mdp_ctl_perf_update_bus(mdata, ctl, 0);
2222 }
2223exit:
2224 mutex_unlock(&mdss_mdp_ctl_lock);
2225}
2226
2227static int mdss_mdp_select_clk_lvl(struct mdss_data_type *mdata,
2228 u32 clk_rate)
2229{
2230 int i;
2231
2232 for (i = 0; i < mdata->nclk_lvl; i++) {
2233 if (clk_rate > mdata->clock_levels[i]) {
2234 continue;
2235 } else {
2236 clk_rate = mdata->clock_levels[i];
2237 break;
2238 }
2239 }
2240
2241 return clk_rate;
2242}
2243
2244static void mdss_mdp_perf_release_ctl_bw(struct mdss_mdp_ctl *ctl,
2245 struct mdss_mdp_perf_params *perf)
2246{
2247 /* Set to zero controller bandwidth. */
2248 memset(perf, 0, sizeof(*perf));
2249 ctl->perf_release_ctl_bw = false;
2250}
2251
2252u32 mdss_mdp_get_mdp_clk_rate(struct mdss_data_type *mdata)
2253{
2254 u32 clk_rate = 0;
2255 uint i;
2256 struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
2257
2258 for (i = 0; i < mdata->nctl; i++) {
2259 struct mdss_mdp_ctl *ctl;
2260
2261 ctl = mdata->ctl_off + i;
2262 if (mdss_mdp_ctl_is_power_on(ctl)) {
2263 clk_rate = max(ctl->cur_perf.mdp_clk_rate,
2264 clk_rate);
2265 clk_rate = clk_round_rate(clk, clk_rate);
2266 }
2267 }
2268 clk_rate = mdss_mdp_select_clk_lvl(mdata, clk_rate);
2269
2270 pr_debug("clk:%u nctl:%d\n", clk_rate, mdata->nctl);
2271 return clk_rate;
2272}
2273
2274static bool is_traffic_shaper_enabled(struct mdss_data_type *mdata)
2275{
2276 uint i;
2277
2278 for (i = 0; i < mdata->nctl; i++) {
2279 struct mdss_mdp_ctl *ctl;
2280
2281 ctl = mdata->ctl_off + i;
2282 if (mdss_mdp_ctl_is_power_on(ctl))
2283 if (ctl->traffic_shaper_enabled)
2284 return true;
2285 }
2286 return false;
2287}
2288
Ingrid Gallardofb8a0142016-08-29 20:34:59 -07002289static bool __mdss_mdp_compare_bw(
2290 struct mdss_mdp_ctl *ctl,
2291 struct mdss_mdp_perf_params *new_perf,
2292 struct mdss_mdp_perf_params *old_perf,
2293 bool params_changed,
2294 bool stop_req)
2295{
2296 struct mdss_data_type *mdata = ctl->mdata;
2297 bool is_nrt = mdss_mdp_is_nrt_ctl_path(ctl);
2298 u64 new_ib =
2299 __calc_bus_ib_quota(mdata, new_perf, is_nrt, new_perf->bw_ctl);
2300 u64 old_ib =
2301 __calc_bus_ib_quota(mdata, old_perf, is_nrt, old_perf->bw_ctl);
2302 u64 new_ab = new_perf->bw_ctl;
2303 u64 old_ab = old_perf->bw_ctl;
2304 bool update_bw = false;
2305
2306 /*
2307 * three cases for bus bandwidth update.
2308 * 1. new bandwidth vote (ab or ib) or writeback output vote
2309 * are higher than current vote for update request.
2310 * 2. new bandwidth vote or writeback output vote are
2311 * lower than current vote at end of commit or stop.
2312 * 3. end of writeback/rotator session - last chance to
2313 * non-realtime remove vote.
2314 */
2315 if ((params_changed &&
2316 (((new_ib > old_ib) || (new_ab > old_ab)) ||
2317 (new_perf->bw_writeback > old_perf->bw_writeback))) ||
2318 (!params_changed &&
2319 (((new_ib < old_ib) || (new_ab < old_ab)) ||
2320 (new_perf->bw_writeback < old_perf->bw_writeback))) ||
2321 (stop_req && is_nrt))
2322 update_bw = true;
2323
2324 trace_mdp_compare_bw(new_perf->bw_ctl, new_ib, new_perf->bw_writeback,
2325 old_perf->bw_ctl, old_ib, old_perf->bw_writeback,
2326 params_changed, update_bw);
2327
2328 return update_bw;
2329}
2330
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302331static void mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl,
2332 int params_changed, bool stop_req)
2333{
2334 struct mdss_mdp_perf_params *new, *old;
2335 int update_bus = 0, update_clk = 0;
2336 struct mdss_data_type *mdata;
2337 bool is_bw_released;
2338 u32 clk_rate = 0;
2339
2340 if (!ctl || !ctl->mdata)
2341 return;
2342 ATRACE_BEGIN(__func__);
2343 mutex_lock(&mdss_mdp_ctl_lock);
2344
2345 mdata = ctl->mdata;
2346 old = &ctl->cur_perf;
2347 new = &ctl->new_perf;
2348
2349 /*
2350 * We could have released the bandwidth if there were no transactions
2351 * pending, so we want to re-calculate the bandwidth in this situation.
2352 */
2353 is_bw_released = !mdss_mdp_ctl_perf_get_transaction_status(ctl);
2354
2355 if (mdss_mdp_ctl_is_power_on(ctl)) {
2356 /* Skip perf update if ctl is used for rotation */
2357 if (ctl->mixer_left && ctl->mixer_left->rotator_mode)
2358 goto end;
2359
2360 if (ctl->perf_release_ctl_bw &&
2361 mdata->enable_rotator_bw_release)
2362 mdss_mdp_perf_release_ctl_bw(ctl, new);
2363 else if (is_bw_released || params_changed)
2364 mdss_mdp_perf_calc_ctl(ctl, new);
2365
Ingrid Gallardofb8a0142016-08-29 20:34:59 -07002366 if (__mdss_mdp_compare_bw(ctl, new, old, params_changed,
2367 stop_req)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302368
2369 pr_debug("c=%d p=%d new_bw=%llu,old_bw=%llu\n",
2370 ctl->num, params_changed, new->bw_ctl,
2371 old->bw_ctl);
2372 if (stop_req) {
2373 old->bw_writeback = 0;
2374 old->bw_ctl = 0;
2375 old->max_per_pipe_ib = 0;
2376 } else {
2377 old->bw_ctl = new->bw_ctl;
2378 old->max_per_pipe_ib = new->max_per_pipe_ib;
2379 old->bw_writeback = new->bw_writeback;
2380 }
2381 bitmap_copy(old->bw_vote_mode, new->bw_vote_mode,
2382 MDSS_MDP_BW_MODE_MAX);
2383 update_bus = 1;
2384 }
2385
2386 /*
2387 * If traffic shaper is enabled, we do not decrease the clock,
2388 * otherwise we would increase traffic shaper latency. Clock
2389 * would be decreased after traffic shaper is done.
2390 */
2391 if ((params_changed && (new->mdp_clk_rate > old->mdp_clk_rate))
2392 || (!params_changed &&
2393 (new->mdp_clk_rate < old->mdp_clk_rate) &&
2394 (false == is_traffic_shaper_enabled(mdata)))) {
2395 old->mdp_clk_rate = new->mdp_clk_rate;
2396 update_clk = 1;
2397 }
2398 } else {
2399 memset(old, 0, sizeof(*old));
2400 memset(new, 0, sizeof(*new));
2401 update_bus = 1;
2402 update_clk = 1;
2403 }
2404
2405 /*
2406 * Calculate mdp clock before bandwidth calculation. If traffic shaper
2407 * is enabled and clock increased, the bandwidth calculation can
2408 * use the new clock for the rotator bw calculation.
2409 */
2410 if (update_clk)
2411 clk_rate = mdss_mdp_get_mdp_clk_rate(mdata);
2412
2413 if (update_bus)
2414 mdss_mdp_ctl_perf_update_bus(mdata, ctl, clk_rate);
2415
2416 /*
2417 * Update the clock after bandwidth vote to ensure
2418 * bandwidth is available before clock rate is increased.
2419 */
2420 if (update_clk) {
2421 ATRACE_INT("mdp_clk", clk_rate);
Ingrid Gallardoddb6d982016-11-23 17:38:15 -08002422 mdss_mdp_set_clk_rate(clk_rate, false);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302423 pr_debug("update clk rate = %d HZ\n", clk_rate);
2424 }
2425
2426end:
2427 mutex_unlock(&mdss_mdp_ctl_lock);
2428 ATRACE_END(__func__);
2429}
2430
2431struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata,
2432 u32 off)
2433{
2434 struct mdss_mdp_ctl *ctl = NULL;
2435 u32 cnum;
2436 u32 nctl = mdata->nctl;
2437
2438 mutex_lock(&mdss_mdp_ctl_lock);
2439 if (mdata->wfd_mode == MDSS_MDP_WFD_SHARED)
2440 nctl++;
2441
2442 for (cnum = off; cnum < nctl; cnum++) {
2443 ctl = mdata->ctl_off + cnum;
2444 if (ctl->ref_cnt == 0) {
2445 ctl->ref_cnt++;
2446 ctl->mdata = mdata;
2447 mutex_init(&ctl->lock);
2448 mutex_init(&ctl->offlock);
2449 mutex_init(&ctl->flush_lock);
2450 mutex_init(&ctl->rsrc_lock);
2451 spin_lock_init(&ctl->spin_lock);
2452 BLOCKING_INIT_NOTIFIER_HEAD(&ctl->notifier_head);
2453 pr_debug("alloc ctl_num=%d\n", ctl->num);
2454 break;
2455 }
2456 ctl = NULL;
2457 }
2458 mutex_unlock(&mdss_mdp_ctl_lock);
2459
2460 return ctl;
2461}
2462
2463int mdss_mdp_ctl_free(struct mdss_mdp_ctl *ctl)
2464{
2465 if (!ctl)
2466 return -ENODEV;
2467
2468 pr_debug("free ctl_num=%d ref_cnt=%d\n", ctl->num, ctl->ref_cnt);
2469
2470 if (!ctl->ref_cnt) {
2471 pr_err("called with ref_cnt=0\n");
2472 return -EINVAL;
2473 }
2474
2475 if (ctl->mixer_left && ctl->mixer_left->ref_cnt)
2476 mdss_mdp_mixer_free(ctl->mixer_left);
2477
2478 if (ctl->mixer_right && ctl->mixer_right->ref_cnt)
2479 mdss_mdp_mixer_free(ctl->mixer_right);
2480
2481 if (ctl->wb)
2482 mdss_mdp_wb_free(ctl->wb);
2483
2484 mutex_lock(&mdss_mdp_ctl_lock);
2485 ctl->ref_cnt--;
2486 ctl->intf_num = MDSS_MDP_NO_INTF;
2487 ctl->intf_type = MDSS_MDP_NO_INTF;
2488 ctl->is_secure = false;
2489 ctl->power_state = MDSS_PANEL_POWER_OFF;
2490 ctl->mixer_left = NULL;
2491 ctl->mixer_right = NULL;
2492 ctl->wb = NULL;
2493 ctl->cdm = NULL;
2494 memset(&ctl->ops, 0, sizeof(ctl->ops));
2495 mutex_unlock(&mdss_mdp_ctl_lock);
2496
2497 return 0;
2498}
2499
2500/**
2501 * mdss_mdp_mixer_alloc() - allocate mdp mixer.
2502 * @ctl: mdp controller.
2503 * @type: specifying type of mixer requested. interface or writeback.
2504 * @mux: specifies if mixer allocation is for split_fb cases.
2505 * @rotator: specifies if the mixer requested for rotator operations.
2506 *
2507 * This function is called to request allocation of mdp mixer
2508 * during mdp controller path setup.
2509 *
2510 * Return: mdp mixer structure that is allocated.
2511 * NULL if mixer allocation fails.
2512 */
2513struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
2514 struct mdss_mdp_ctl *ctl, u32 type, int mux, int rotator)
2515{
2516 struct mdss_mdp_mixer *mixer = NULL, *alt_mixer = NULL;
2517 u32 nmixers_intf;
2518 u32 nmixers_wb;
2519 u32 i;
2520 u32 nmixers;
Krishna Manikandan96624d42017-06-09 11:03:31 +05302521 u32 nmixers_active;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302522 struct mdss_mdp_mixer *mixer_pool = NULL;
2523
2524 if (!ctl || !ctl->mdata)
2525 return NULL;
2526
2527 mutex_lock(&mdss_mdp_ctl_lock);
2528 nmixers_intf = ctl->mdata->nmixers_intf;
2529 nmixers_wb = ctl->mdata->nmixers_wb;
2530
2531 switch (type) {
2532 case MDSS_MDP_MIXER_TYPE_INTF:
2533 mixer_pool = ctl->mdata->mixer_intf;
2534 nmixers = nmixers_intf;
Krishna Manikandan96624d42017-06-09 11:03:31 +05302535 nmixers_active = nmixers;
2536
2537 for (i = 0; i < nmixers; i++) {
2538 mixer = mixer_pool + i;
2539 if (mixer->ref_cnt)
2540 nmixers_active--;
2541 }
2542 mixer = NULL;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302543
2544 /*
2545 * try to reserve first layer mixer for write back if
Krishna Manikandan96624d42017-06-09 11:03:31 +05302546 * assertive display needs to be supported through wfd.
2547 * For external displays(pluggable) and writeback avoid
2548 * allocating mixers LM0 and LM1 which are allocated
2549 * to primary display first.
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302550 */
2551 if (ctl->mdata->has_wb_ad && ctl->intf_num &&
2552 ((ctl->panel_data->panel_info.type != MIPI_CMD_PANEL) ||
2553 !mux)) {
2554 alt_mixer = mixer_pool;
2555 mixer_pool++;
2556 nmixers--;
2557 } else if ((ctl->panel_data->panel_info.type == WRITEBACK_PANEL)
2558 && (ctl->mdata->ndspp < nmixers)) {
2559 mixer_pool += ctl->mdata->ndspp;
2560 nmixers -= ctl->mdata->ndspp;
Krishna Manikandan96624d42017-06-09 11:03:31 +05302561 } else if ((ctl->panel_data->panel_info.is_pluggable) &&
raghavendra ambadas146ed982018-07-17 16:36:36 +05302562 nmixers_active > 1) {
Krishna Manikandan96624d42017-06-09 11:03:31 +05302563 mixer_pool += ctl->mdata->ndspp;
2564 nmixers -= ctl->mdata->ndspp;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302565 }
2566 break;
2567
2568 case MDSS_MDP_MIXER_TYPE_WRITEBACK:
2569 mixer_pool = ctl->mdata->mixer_wb;
2570 nmixers = nmixers_wb;
2571 if ((ctl->mdata->wfd_mode == MDSS_MDP_WFD_DEDICATED) && rotator)
2572 mixer_pool = mixer_pool + nmixers;
2573 break;
2574
2575 default:
2576 nmixers = 0;
2577 pr_err("invalid pipe type %d\n", type);
2578 break;
2579 }
2580
2581 /*Allocate virtual wb mixer if no dedicated wfd wb blk is present*/
2582 if ((ctl->mdata->wfd_mode == MDSS_MDP_WFD_SHARED) &&
2583 (type == MDSS_MDP_MIXER_TYPE_WRITEBACK))
2584 nmixers += 1;
2585
2586 for (i = 0; i < nmixers; i++) {
2587 mixer = mixer_pool + i;
2588 if (mixer->ref_cnt == 0)
2589 break;
2590 mixer = NULL;
2591 }
2592
2593 if (!mixer && alt_mixer && (alt_mixer->ref_cnt == 0))
2594 mixer = alt_mixer;
2595
2596 if (mixer) {
2597 mixer->ref_cnt++;
2598 mixer->params_changed++;
2599 mixer->ctl = ctl;
2600 mixer->next_pipe_map = 0;
2601 mixer->pipe_mapped = 0;
2602 pr_debug("alloc mixer num %d for ctl=%d\n",
2603 mixer->num, ctl->num);
2604 }
2605 mutex_unlock(&mdss_mdp_ctl_lock);
2606
2607 return mixer;
2608}
2609
2610struct mdss_mdp_mixer *mdss_mdp_mixer_assign(u32 id, bool wb, bool rot)
2611{
2612 struct mdss_mdp_mixer *mixer = NULL;
2613 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2614
2615 mutex_lock(&mdss_mdp_ctl_lock);
2616
2617 if (rot && (mdata->wfd_mode == MDSS_MDP_WFD_DEDICATED))
2618 mixer = mdata->mixer_wb + mdata->nmixers_wb;
2619 else if (wb && id < mdata->nmixers_wb)
2620 mixer = mdata->mixer_wb + id;
2621 else if (!wb && id < mdata->nmixers_intf)
2622 mixer = mdata->mixer_intf + id;
2623
2624 if (mixer && mixer->ref_cnt == 0) {
2625 mixer->ref_cnt++;
2626 mixer->params_changed++;
2627 } else {
2628 pr_err("mixer is in use already = %d\n", id);
2629 mixer = NULL;
2630 }
2631 mutex_unlock(&mdss_mdp_ctl_lock);
2632 return mixer;
2633}
2634
2635int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer)
2636{
2637 if (!mixer)
2638 return -ENODEV;
2639
2640 pr_debug("free mixer_num=%d ref_cnt=%d\n", mixer->num, mixer->ref_cnt);
2641
2642 if (!mixer->ref_cnt) {
2643 pr_err("called with ref_cnt=0\n");
2644 return -EINVAL;
2645 }
2646
2647 mutex_lock(&mdss_mdp_ctl_lock);
2648 mixer->ref_cnt--;
2649 mixer->is_right_mixer = false;
2650 mutex_unlock(&mdss_mdp_ctl_lock);
2651
2652 return 0;
2653}
2654
2655struct mdss_mdp_mixer *mdss_mdp_block_mixer_alloc(void)
2656{
2657 struct mdss_mdp_ctl *ctl = NULL;
2658 struct mdss_mdp_mixer *mixer = NULL;
2659 struct mdss_mdp_writeback *wb = NULL;
2660 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2661 u32 offset = mdss_mdp_get_wb_ctl_support(mdata, true);
2662 int ret = 0;
2663
2664 ctl = mdss_mdp_ctl_alloc(mdss_res, offset);
2665 if (!ctl) {
2666 pr_debug("unable to allocate wb ctl\n");
2667 return NULL;
2668 }
2669
2670 mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK,
2671 false, true);
2672 if (!mixer) {
2673 pr_debug("unable to allocate wb mixer\n");
2674 goto error;
2675 }
2676
2677 mixer->rotator_mode = 1;
2678
2679 switch (mixer->num) {
2680 case MDSS_MDP_WB_LAYERMIXER0:
2681 ctl->opmode = MDSS_MDP_CTL_OP_ROT0_MODE;
2682 break;
2683 case MDSS_MDP_WB_LAYERMIXER1:
2684 ctl->opmode = MDSS_MDP_CTL_OP_ROT1_MODE;
2685 break;
2686 default:
2687 pr_err("invalid layer mixer=%d\n", mixer->num);
2688 goto error;
2689 }
2690
2691 wb = mdss_mdp_wb_alloc(MDSS_MDP_WB_ROTATOR, ctl->num);
2692 if (!wb) {
2693 pr_err("Unable to allocate writeback block\n");
2694 goto error;
2695 }
2696
2697 ctl->mixer_left = mixer;
2698
2699 ctl->ops.start_fnc = mdss_mdp_writeback_start;
2700 ctl->power_state = MDSS_PANEL_POWER_ON;
2701 ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_BLOCK;
2702 mixer->ctl = ctl;
2703 ctl->wb = wb;
2704
2705 if (ctl->ops.start_fnc)
2706 ret = ctl->ops.start_fnc(ctl);
2707
2708 if (!ret)
2709 return mixer;
2710error:
2711 if (wb)
2712 mdss_mdp_wb_free(wb);
2713 if (mixer)
2714 mdss_mdp_mixer_free(mixer);
2715 if (ctl)
2716 mdss_mdp_ctl_free(ctl);
2717
2718 return NULL;
2719}
2720
2721int mdss_mdp_block_mixer_destroy(struct mdss_mdp_mixer *mixer)
2722{
2723 struct mdss_mdp_ctl *ctl;
2724
2725 if (!mixer || !mixer->ctl) {
2726 pr_err("invalid ctl handle\n");
2727 return -ENODEV;
2728 }
2729
2730 ctl = mixer->ctl;
2731 mixer->rotator_mode = 0;
2732
2733 pr_debug("destroy ctl=%d mixer=%d\n", ctl->num, mixer->num);
2734
2735 if (ctl->ops.stop_fnc)
2736 ctl->ops.stop_fnc(ctl, MDSS_PANEL_POWER_OFF);
2737
2738 mdss_mdp_ctl_free(ctl);
2739
2740 mdss_mdp_ctl_perf_update(ctl, 0, true);
2741
2742 return 0;
2743}
2744
2745int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
2746 ktime_t *wakeup_time)
2747{
2748 struct mdss_panel_info *pinfo;
2749 u64 clk_rate;
2750 u32 clk_period;
2751 u32 current_line, total_line;
2752 u32 time_of_line, time_to_vsync, adjust_line_ns;
2753
2754 ktime_t current_time = ktime_get();
2755
2756 if (!ctl->ops.read_line_cnt_fnc)
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302757 return -ENOTSUPP;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302758
2759 pinfo = &ctl->panel_data->panel_info;
2760 if (!pinfo)
2761 return -ENODEV;
2762
2763 clk_rate = mdss_mdp_get_pclk_rate(ctl);
2764
2765 clk_rate = DIV_ROUND_UP_ULL(clk_rate, 1000); /* in kHz */
2766 if (!clk_rate)
2767 return -EINVAL;
2768
2769 /*
2770 * calculate clk_period as pico second to maintain good
2771 * accuracy with high pclk rate and this number is in 17 bit
2772 * range.
2773 */
2774 clk_period = DIV_ROUND_UP_ULL(1000000000, clk_rate);
2775 if (!clk_period)
2776 return -EINVAL;
2777
Benjamin Chan56c1b252017-05-17 13:37:03 -04002778 time_of_line = mdss_panel_get_htotal(pinfo, true) * clk_period;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302779
2780 time_of_line /= 1000; /* in nano second */
2781 if (!time_of_line)
2782 return -EINVAL;
2783
2784 current_line = ctl->ops.read_line_cnt_fnc(ctl);
2785
Benjamin Chan56c1b252017-05-17 13:37:03 -04002786 total_line = mdss_panel_get_vtotal(pinfo);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302787
2788 if (current_line >= total_line)
2789 time_to_vsync = time_of_line * total_line;
2790 else
2791 time_to_vsync = time_of_line * (total_line - current_line);
2792
2793 if (pinfo->adjust_timer_delay_ms) {
2794 adjust_line_ns = pinfo->adjust_timer_delay_ms
2795 * 1000000; /* convert to ns */
2796
2797 /* Ignore large values of adjust_line_ns\ */
2798 if (time_to_vsync > adjust_line_ns)
2799 time_to_vsync -= adjust_line_ns;
2800 }
2801
2802 if (!time_to_vsync)
2803 return -EINVAL;
2804
2805 *wakeup_time = ktime_add_ns(current_time, time_to_vsync);
2806
2807 pr_debug("clk_rate=%lldkHz clk_period=%d cur_line=%d tot_line=%d\n",
2808 clk_rate, clk_period, current_line, total_line);
2809 pr_debug("time_to_vsync=%d current_time=%d wakeup_time=%d\n",
2810 time_to_vsync, (int)ktime_to_ms(current_time),
2811 (int)ktime_to_ms(*wakeup_time));
2812
2813 return 0;
2814}
2815
2816static void __cpu_pm_work_handler(struct work_struct *work)
2817{
2818 struct mdss_mdp_ctl *ctl =
2819 container_of(work, typeof(*ctl), cpu_pm_work);
2820 ktime_t wakeup_time;
2821 struct mdss_overlay_private *mdp5_data;
2822
2823 if (!ctl)
2824 return;
2825
2826 if (mdss_mdp_display_wakeup_time(ctl, &wakeup_time))
2827 return;
2828
2829 mdp5_data = mfd_to_mdp5_data(ctl->mfd);
2830 activate_event_timer(mdp5_data->cpu_pm_hdl, wakeup_time);
2831}
2832
2833void mdss_mdp_ctl_event_timer(void *data)
2834{
2835 struct mdss_overlay_private *mdp5_data =
2836 (struct mdss_overlay_private *)data;
2837 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
2838
2839 if (mdp5_data->cpu_pm_hdl && ctl && ctl->autorefresh_frame_cnt)
2840 schedule_work(&ctl->cpu_pm_work);
2841}
2842
2843int mdss_mdp_ctl_cmd_set_autorefresh(struct mdss_mdp_ctl *ctl, int frame_cnt)
2844{
2845 int ret = 0;
2846 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(ctl->mfd);
2847
2848 if (ctl->panel_data->panel_info.type == MIPI_CMD_PANEL) {
2849 ret = mdss_mdp_cmd_set_autorefresh_mode(ctl, frame_cnt);
2850 if (!ret) {
2851 ctl->autorefresh_frame_cnt = frame_cnt;
2852 if (frame_cnt)
2853 mdss_mdp_ctl_event_timer(mdp5_data);
2854 }
2855 } else {
2856 pr_err("Mode not supported for this panel\n");
2857 ret = -EINVAL;
2858 }
2859
2860 return ret;
2861}
2862
2863int mdss_mdp_ctl_cmd_get_autorefresh(struct mdss_mdp_ctl *ctl)
2864{
2865 if (ctl->panel_data->panel_info.type == MIPI_CMD_PANEL)
2866 return mdss_mdp_cmd_get_autorefresh_mode(ctl);
2867 else
2868 return 0;
2869}
2870
2871int mdss_mdp_ctl_splash_finish(struct mdss_mdp_ctl *ctl, bool handoff)
2872{
2873 switch (ctl->panel_data->panel_info.type) {
2874 case MIPI_VIDEO_PANEL:
2875 case EDP_PANEL:
2876 case DTV_PANEL:
2877 return mdss_mdp_video_reconfigure_splash_done(ctl, handoff);
2878 case MIPI_CMD_PANEL:
2879 return mdss_mdp_cmd_reconfigure_splash_done(ctl, handoff);
2880 default:
2881 return 0;
2882 }
2883}
2884
2885static inline int mdss_mdp_set_split_ctl(struct mdss_mdp_ctl *ctl,
2886 struct mdss_mdp_ctl *split_ctl)
2887{
2888 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2889 struct mdss_panel_info *pinfo;
2890
2891
2892 if (!ctl || !split_ctl || !mdata)
2893 return -ENODEV;
2894
2895 /* setup split ctl mixer as right mixer of original ctl so that
2896 * original ctl can work the same way as dual pipe solution
2897 */
2898 ctl->mixer_right = split_ctl->mixer_left;
2899 pinfo = &ctl->panel_data->panel_info;
2900
2901 /* add x offset from left ctl's border */
2902 split_ctl->border_x_off += (pinfo->lcdc.border_left +
2903 pinfo->lcdc.border_right);
2904
2905 return 0;
2906}
2907
2908static inline void __dsc_enable(struct mdss_mdp_mixer *mixer)
2909{
2910 mdss_mdp_pingpong_write(mixer->pingpong_base,
2911 MDSS_MDP_REG_PP_DSC_MODE, 1);
Naseer Ahmed1970f5d2016-10-20 15:53:54 -04002912 mixer->dsc_enabled = true;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302913}
2914
2915static inline void __dsc_disable(struct mdss_mdp_mixer *mixer)
2916{
2917 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2918 char __iomem *offset = mdata->mdp_base;
2919
2920 mdss_mdp_pingpong_write(mixer->pingpong_base,
2921 MDSS_MDP_REG_PP_DSC_MODE, 0);
2922
2923 if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0) {
2924 offset += MDSS_MDP_DSC_0_OFFSET;
2925 } else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
2926 offset += MDSS_MDP_DSC_1_OFFSET;
2927 } else {
2928 pr_err("invalid mixer numer=%d\n", mixer->num);
2929 return;
2930 }
2931 writel_relaxed(0, offset + MDSS_MDP_REG_DSC_COMMON_MODE);
Naseer Ahmed1970f5d2016-10-20 15:53:54 -04002932 mixer->dsc_enabled = false;
2933 mixer->dsc_merge_enabled = false;
2934}
2935
2936static bool __is_dsc_merge_enabled(u32 common_mode)
2937{
2938 return common_mode & BIT(1);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302939}
2940
2941static void __dsc_config(struct mdss_mdp_mixer *mixer,
2942 struct dsc_desc *dsc, u32 mode, bool ich_reset_override)
2943{
2944 u32 data;
2945 int bpp, lsb;
2946 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2947 char __iomem *offset = mdata->mdp_base;
2948 u32 initial_lines = dsc->initial_lines;
2949 bool is_cmd_mode = !(mode & BIT(2));
2950
Naseer Ahmed1970f5d2016-10-20 15:53:54 -04002951 mixer->dsc_merge_enabled = __is_dsc_merge_enabled(mode);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302952 data = mdss_mdp_pingpong_read(mixer->pingpong_base,
2953 MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP);
2954 data |= BIT(18); /* endian flip */
2955 mdss_mdp_pingpong_write(mixer->pingpong_base,
2956 MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP, data);
2957
2958 if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0) {
2959 offset += MDSS_MDP_DSC_0_OFFSET;
2960 } else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
2961 offset += MDSS_MDP_DSC_1_OFFSET;
2962 } else {
2963 pr_err("invalid mixer numer=%d\n", mixer->num);
2964 return;
2965 }
2966
2967 writel_relaxed(mode, offset + MDSS_MDP_REG_DSC_COMMON_MODE);
2968
2969 data = 0;
2970 if (ich_reset_override)
2971 data = 3 << 28;
2972
2973 if (is_cmd_mode)
2974 initial_lines += 1;
2975
2976 data |= (initial_lines << 20);
2977 data |= ((dsc->slice_last_group_size - 1) << 18);
2978 /* bpp is 6.4 format, 4 LSBs bits are for fractional part */
2979 lsb = dsc->bpp % 4;
2980 bpp = dsc->bpp / 4;
2981 bpp *= 4; /* either 8 or 12 */
2982 bpp <<= 4;
2983 bpp |= lsb;
2984 data |= (bpp << 8);
2985 data |= (dsc->block_pred_enable << 7);
2986 data |= (dsc->line_buf_depth << 3);
2987 data |= (dsc->enable_422 << 2);
2988 data |= (dsc->convert_rgb << 1);
2989 data |= dsc->input_10_bits;
2990
2991 pr_debug("%d %d %d %d %d %d %d %d %d, data=%x\n",
2992 ich_reset_override,
2993 initial_lines, dsc->slice_last_group_size,
2994 dsc->bpp, dsc->block_pred_enable, dsc->line_buf_depth,
2995 dsc->enable_422, dsc->convert_rgb, dsc->input_10_bits, data);
2996
2997 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_ENC);
2998
2999 data = dsc->pic_width << 16;
3000 data |= dsc->pic_height;
3001 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_PICTURE);
3002
3003 data = dsc->slice_width << 16;
3004 data |= dsc->slice_height;
3005 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SLICE);
3006
3007 data = dsc->chunk_size << 16;
3008 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_CHUNK_SIZE);
3009
3010 pr_debug("mix%d pic_w=%d pic_h=%d, slice_w=%d slice_h=%d, chunk=%d\n",
3011 mixer->num, dsc->pic_width, dsc->pic_height,
3012 dsc->slice_width, dsc->slice_height, dsc->chunk_size);
3013 MDSS_XLOG(mixer->num, dsc->pic_width, dsc->pic_height,
3014 dsc->slice_width, dsc->slice_height, dsc->chunk_size);
3015
3016 data = dsc->initial_dec_delay << 16;
3017 data |= dsc->initial_xmit_delay;
3018 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_DELAY);
3019
3020 data = dsc->initial_scale_value;
3021 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SCALE_INITIAL);
3022
3023 data = dsc->scale_decrement_interval;
3024 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SCALE_DEC_INTERVAL);
3025
3026 data = dsc->scale_increment_interval;
3027 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_SCALE_INC_INTERVAL);
3028
3029 data = dsc->first_line_bpg_offset;
3030 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_FIRST_LINE_BPG_OFFSET);
3031
3032 data = dsc->nfl_bpg_offset << 16;
3033 data |= dsc->slice_bpg_offset;
3034 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_BPG_OFFSET);
3035
3036 data = dsc->initial_offset << 16;
3037 data |= dsc->final_offset;
3038 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_DSC_OFFSET);
3039
3040 data = dsc->det_thresh_flatness << 10;
3041 data |= dsc->max_qp_flatness << 5;
3042 data |= dsc->min_qp_flatness;
3043 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_FLATNESS);
3044 writel_relaxed(0x983, offset + MDSS_MDP_REG_DSC_FLATNESS);
3045
3046 data = dsc->rc_model_size; /* rate_buffer_size */
3047 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_RC_MODEL_SIZE);
3048
3049 data = dsc->tgt_offset_lo << 18;
3050 data |= dsc->tgt_offset_hi << 14;
3051 data |= dsc->quant_incr_limit1 << 9;
3052 data |= dsc->quant_incr_limit0 << 4;
3053 data |= dsc->edge_factor;
3054 writel_relaxed(data, offset + MDSS_MDP_REG_DSC_RC);
3055}
3056
3057static void __dsc_config_thresh(struct mdss_mdp_mixer *mixer,
3058 struct dsc_desc *dsc)
3059{
3060 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
3061 char __iomem *offset, *off;
3062 u32 *lp;
3063 char *cp;
3064 int i;
3065
3066 offset = mdata->mdp_base;
3067
3068 if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0) {
3069 offset += MDSS_MDP_DSC_0_OFFSET;
3070 } else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
3071 offset += MDSS_MDP_DSC_1_OFFSET;
3072 } else {
3073 pr_err("invalid mixer numer=%d\n", mixer->num);
3074 return;
3075 }
3076
3077 lp = dsc->buf_thresh;
3078 off = offset + MDSS_MDP_REG_DSC_RC_BUF_THRESH;
3079 for (i = 0; i < 14; i++) {
3080 writel_relaxed(*lp++, off);
3081 off += 4;
3082 }
3083
3084 cp = dsc->range_min_qp;
3085 off = offset + MDSS_MDP_REG_DSC_RANGE_MIN_QP;
3086 for (i = 0; i < 15; i++) {
3087 writel_relaxed(*cp++, off);
3088 off += 4;
3089 }
3090
3091 cp = dsc->range_max_qp;
3092 off = offset + MDSS_MDP_REG_DSC_RANGE_MAX_QP;
3093 for (i = 0; i < 15; i++) {
3094 writel_relaxed(*cp++, off);
3095 off += 4;
3096 }
3097
3098 cp = dsc->range_bpg_offset;
3099 off = offset + MDSS_MDP_REG_DSC_RANGE_BPG_OFFSET;
3100 for (i = 0; i < 15; i++) {
3101 writel_relaxed(*cp++, off);
3102 off += 4;
3103 }
3104}
3105
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303106static bool __dsc_is_3d_mux_enabled(struct mdss_mdp_ctl *ctl,
3107 struct mdss_panel_info *pinfo)
3108{
3109 return ctl && is_dual_lm_single_display(ctl->mfd) &&
3110 pinfo && (pinfo->dsc_enc_total == 1);
3111}
3112
3113/* must be called from master ctl */
3114static u32 __dsc_get_common_mode(struct mdss_mdp_ctl *ctl, bool mux_3d)
3115{
3116 u32 common_mode = 0;
3117
3118 if (ctl->is_video_mode)
3119 common_mode = BIT(2);
3120
3121 if (mdss_mdp_is_both_lm_valid(ctl))
3122 common_mode |= BIT(0);
3123
3124 if (is_dual_lm_single_display(ctl->mfd)) {
3125 if (mux_3d)
3126 common_mode &= ~BIT(0);
3127 else if (mdss_mdp_is_both_lm_valid(ctl)) /* dsc_merge */
3128 common_mode |= BIT(1);
3129 }
3130
3131 return common_mode;
3132}
3133
3134static void __dsc_get_pic_dim(struct mdss_mdp_mixer *mixer_l,
3135 struct mdss_mdp_mixer *mixer_r, u32 *pic_w, u32 *pic_h)
3136{
3137 bool valid_l = mixer_l && mixer_l->valid_roi;
3138 bool valid_r = mixer_r && mixer_r->valid_roi;
3139
3140 *pic_w = 0;
3141 *pic_h = 0;
3142
3143 if (valid_l) {
3144 *pic_w = mixer_l->roi.w;
3145 *pic_h = mixer_l->roi.h;
3146 }
3147
3148 if (valid_r) {
3149 *pic_w += mixer_r->roi.w;
3150 *pic_h = mixer_r->roi.h;
3151 }
3152}
3153
3154static bool __is_ich_reset_override_needed(bool pu_en, struct dsc_desc *dsc)
3155{
3156 /*
3157 * As per the DSC spec, ICH_RESET can be either end of the slice line
3158 * or at the end of the slice. HW internally generates ich_reset at
3159 * end of the slice line if DSC_MERGE is used or encoder has two
3160 * soft slices. However, if encoder has only 1 soft slice and DSC_MERGE
3161 * is not used then it will generate ich_reset at the end of slice.
3162 *
3163 * Now as per the spec, during one PPS session, position where
3164 * ich_reset is generated should not change. Now if full-screen frame
3165 * has more than 1 soft slice then HW will automatically generate
3166 * ich_reset at the end of slice_line. But for the same panel, if
3167 * partial frame is enabled and only 1 encoder is used with 1 slice,
3168 * then HW will generate ich_reset at end of the slice. This is a
3169 * mismatch. Prevent this by overriding HW's decision.
3170 */
3171 return pu_en && dsc && (dsc->full_frame_slices > 1) &&
3172 (dsc->slice_width == dsc->pic_width);
3173}
3174
3175static void __dsc_setup_dual_lm_single_display(struct mdss_mdp_ctl *ctl,
3176 struct mdss_panel_info *pinfo)
3177{
3178 u32 pic_width = 0, pic_height = 0;
3179 u32 intf_ip_w, enc_ip_w, common_mode, this_frame_slices;
3180 bool valid_l, valid_r;
3181 bool enable_right_dsc;
3182 bool mux_3d, ich_reset_override;
3183 struct dsc_desc *dsc;
3184 struct mdss_mdp_mixer *mixer_l, *mixer_r;
3185 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
3186
3187 if (!pinfo || !ctl || !ctl->is_master ||
3188 !is_dual_lm_single_display(ctl->mfd))
3189 return;
3190
3191 dsc = &pinfo->dsc;
3192 mixer_l = ctl->mixer_left;
3193 mixer_r = ctl->mixer_right;
3194
3195 mux_3d = __dsc_is_3d_mux_enabled(ctl, pinfo);
3196 common_mode = __dsc_get_common_mode(ctl, mux_3d);
3197 __dsc_get_pic_dim(mixer_l, mixer_r, &pic_width, &pic_height);
3198
3199 valid_l = mixer_l->valid_roi;
3200 valid_r = mixer_r->valid_roi;
3201 if (mdss_mdp_is_lm_swap_needed(mdata, ctl)) {
3202 valid_l = true;
3203 valid_r = false;
3204 }
3205
3206 this_frame_slices = pic_width / dsc->slice_width;
3207
3208 /* enable or disable pp_split + DSC_Merge based on partial update */
3209 if ((pinfo->partial_update_enabled) && !mux_3d &&
3210 (dsc->full_frame_slices == 4) &&
3211 (mdss_has_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT))) {
3212
3213 if (valid_l && valid_r) {
3214 /* left + right */
3215 pr_debug("full line (4 slices) or middle 2 slice partial update\n");
3216 writel_relaxed(0x0,
3217 mdata->mdp_base + mdata->ppb_ctl[0]);
3218 writel_relaxed(0x0,
3219 mdata->mdp_base + MDSS_MDP_REG_DCE_SEL);
3220 } else if (valid_l || valid_r) {
3221 /* left-only or right-only */
3222 if (this_frame_slices == 2) {
3223 pr_debug("2 slice parital update, use merge\n");
3224
3225 /* tandem + merge */
3226 common_mode = BIT(1) | BIT(0);
3227
3228 valid_r = true;
3229 valid_l = true;
3230
3231 writel_relaxed(0x2 << 4, mdata->mdp_base +
3232 mdata->ppb_ctl[0]);
3233 writel_relaxed(BIT(0),
3234 mdata->mdp_base + MDSS_MDP_REG_DCE_SEL);
3235 } else {
3236 pr_debug("only one slice partial update\n");
3237 writel_relaxed(0x0, mdata->mdp_base +
3238 mdata->ppb_ctl[0]);
3239 writel_relaxed(0x0, mdata->mdp_base +
3240 MDSS_MDP_REG_DCE_SEL);
3241 }
3242 }
3243 } else {
3244 writel_relaxed(0x0, mdata->mdp_base + MDSS_MDP_REG_DCE_SEL);
3245 }
3246
3247 mdss_panel_dsc_update_pic_dim(dsc, pic_width, pic_height);
3248
3249 intf_ip_w = this_frame_slices * dsc->slice_width;
3250 mdss_panel_dsc_pclk_param_calc(dsc, intf_ip_w);
3251
3252 enc_ip_w = intf_ip_w;
3253 /* if dsc_merge, both encoders work on same number of slices */
3254 if (__is_dsc_merge_enabled(common_mode))
3255 enc_ip_w /= 2;
3256 mdss_panel_dsc_initial_line_calc(dsc, enc_ip_w);
3257
3258 /*
3259 * __is_ich_reset_override_needed should be called only after
3260 * updating pic dimension, mdss_panel_dsc_update_pic_dim.
3261 */
3262 ich_reset_override = __is_ich_reset_override_needed(
3263 pinfo->partial_update_enabled, dsc);
3264 if (valid_l) {
3265 __dsc_config(mixer_l, dsc, common_mode, ich_reset_override);
3266 __dsc_config_thresh(mixer_l, dsc);
3267 __dsc_enable(mixer_l);
3268 } else {
3269 __dsc_disable(mixer_l);
3270 }
3271
3272 enable_right_dsc = valid_r;
3273 if (mux_3d && valid_l)
3274 enable_right_dsc = false;
3275
3276 if (enable_right_dsc) {
3277 __dsc_config(mixer_r, dsc, common_mode, ich_reset_override);
3278 __dsc_config_thresh(mixer_r, dsc);
3279 __dsc_enable(mixer_r);
3280 } else {
3281 __dsc_disable(mixer_r);
3282 }
3283
3284 pr_debug("mix%d: valid_l=%d mix%d: valid_r=%d mode=%d, pic_dim:%dx%d mux_3d=%d intf_ip_w=%d enc_ip_w=%d ich_ovrd=%d\n",
3285 mixer_l->num, valid_l, mixer_r->num, valid_r,
3286 common_mode, pic_width, pic_height,
3287 mux_3d, intf_ip_w, enc_ip_w, ich_reset_override);
3288
3289 MDSS_XLOG(mixer_l->num, valid_l, mixer_r->num, valid_r,
3290 common_mode, pic_width, pic_height,
3291 mux_3d, intf_ip_w, enc_ip_w, ich_reset_override);
3292}
3293
3294static void __dsc_setup_dual_lm_dual_display(
3295 struct mdss_mdp_ctl *ctl, struct mdss_panel_info *pinfo,
3296 struct mdss_mdp_ctl *sctl, struct mdss_panel_info *spinfo)
3297{
3298 u32 pic_width = 0, pic_height = 0;
3299 u32 intf_ip_w, enc_ip_w, common_mode, this_frame_slices;
3300 bool valid_l, valid_r;
3301 bool ich_reset_override;
3302 struct dsc_desc *dsc_l, *dsc_r;
3303 struct mdss_mdp_mixer *mixer_l, *mixer_r;
3304 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
3305
3306 if (!pinfo || !ctl || !sctl || !spinfo ||
3307 !ctl->is_master || !ctl->mfd ||
3308 (ctl->mfd->split_mode != MDP_DUAL_LM_DUAL_DISPLAY))
3309 return;
3310
3311 dsc_l = &pinfo->dsc;
3312 dsc_r = &spinfo->dsc;
3313
3314 mixer_l = ctl->mixer_left;
3315 mixer_r = ctl->mixer_right;
3316
3317 common_mode = __dsc_get_common_mode(ctl, false);
3318 /*
3319 * In this topology, both DSC use same pic dimension. So no need to
3320 * maintain two separate local copies.
3321 */
3322 __dsc_get_pic_dim(mixer_l, mixer_r, &pic_width, &pic_height);
3323
3324 valid_l = mixer_l->valid_roi;
3325 valid_r = mixer_r->valid_roi;
3326 if (mdss_mdp_is_lm_swap_needed(mdata, ctl)) {
3327 valid_l = true;
3328 valid_r = false;
3329 }
3330
3331 /*
3332 * Since both DSC use same pic dimension, set same pic dimension
3333 * to both DSC structures.
3334 */
3335 mdss_panel_dsc_update_pic_dim(dsc_l, pic_width, pic_height);
3336 mdss_panel_dsc_update_pic_dim(dsc_r, pic_width, pic_height);
3337
3338 this_frame_slices = pic_width / dsc_l->slice_width;
3339 intf_ip_w = this_frame_slices * dsc_l->slice_width;
3340 if (valid_l && valid_r)
3341 intf_ip_w /= 2;
3342 /*
3343 * In this topology when both interfaces are active, they have same
3344 * load so intf_ip_w will be same.
3345 */
3346 mdss_panel_dsc_pclk_param_calc(dsc_l, intf_ip_w);
3347 mdss_panel_dsc_pclk_param_calc(dsc_r, intf_ip_w);
3348
3349 /*
3350 * In this topology, since there is no dsc_merge, uncompressed input
3351 * to encoder and interface is same.
3352 */
3353 enc_ip_w = intf_ip_w;
3354 mdss_panel_dsc_initial_line_calc(dsc_l, enc_ip_w);
3355 mdss_panel_dsc_initial_line_calc(dsc_r, enc_ip_w);
3356
3357 /*
3358 * __is_ich_reset_override_needed should be called only after
3359 * updating pic dimension, mdss_panel_dsc_update_pic_dim.
3360 */
3361 ich_reset_override = __is_ich_reset_override_needed(
3362 pinfo->partial_update_enabled, dsc_l);
3363
3364 if (valid_l) {
3365 __dsc_config(mixer_l, dsc_l, common_mode, ich_reset_override);
3366 __dsc_config_thresh(mixer_l, dsc_l);
3367 __dsc_enable(mixer_l);
3368 } else {
3369 __dsc_disable(mixer_l);
3370 }
3371
3372 if (valid_r) {
3373 __dsc_config(mixer_r, dsc_r, common_mode, ich_reset_override);
3374 __dsc_config_thresh(mixer_r, dsc_r);
3375 __dsc_enable(mixer_r);
3376 } else {
3377 __dsc_disable(mixer_r);
3378 }
3379
3380 pr_debug("mix%d: valid_l=%d mix%d: valid_r=%d mode=%d, pic_dim:%dx%d intf_ip_w=%d enc_ip_w=%d ich_ovrd=%d\n",
3381 mixer_l->num, valid_l, mixer_r->num, valid_r,
3382 common_mode, pic_width, pic_height,
3383 intf_ip_w, enc_ip_w, ich_reset_override);
3384
3385 MDSS_XLOG(mixer_l->num, valid_l, mixer_r->num, valid_r,
3386 common_mode, pic_width, pic_height,
3387 intf_ip_w, enc_ip_w, ich_reset_override);
3388}
3389
3390static void __dsc_setup_single_lm_single_display(struct mdss_mdp_ctl *ctl,
3391 struct mdss_panel_info *pinfo)
3392{
3393 u32 pic_width = 0, pic_height = 0;
3394 u32 intf_ip_w, enc_ip_w, common_mode, this_frame_slices;
3395 bool valid;
3396 bool ich_reset_override;
3397 struct dsc_desc *dsc;
3398 struct mdss_mdp_mixer *mixer;
3399
3400 if (!pinfo || !ctl || !ctl->is_master)
3401 return;
3402
3403 dsc = &pinfo->dsc;
3404 mixer = ctl->mixer_left;
3405 valid = mixer->valid_roi;
3406
3407 common_mode = __dsc_get_common_mode(ctl, false);
3408 __dsc_get_pic_dim(mixer, NULL, &pic_width, &pic_height);
3409
3410 mdss_panel_dsc_update_pic_dim(dsc, pic_width, pic_height);
3411
3412 this_frame_slices = pic_width / dsc->slice_width;
3413 intf_ip_w = this_frame_slices * dsc->slice_width;
3414 mdss_panel_dsc_pclk_param_calc(dsc, intf_ip_w);
3415
3416 enc_ip_w = intf_ip_w;
3417 mdss_panel_dsc_initial_line_calc(dsc, enc_ip_w);
3418
3419 /*
3420 * __is_ich_reset_override_needed should be called only after
3421 * updating pic dimension, mdss_panel_dsc_update_pic_dim.
3422 */
3423 ich_reset_override = __is_ich_reset_override_needed(
3424 pinfo->partial_update_enabled, dsc);
3425 if (valid) {
3426 __dsc_config(mixer, dsc, common_mode, ich_reset_override);
3427 __dsc_config_thresh(mixer, dsc);
3428 __dsc_enable(mixer);
3429 } else {
3430 __dsc_disable(mixer);
3431 }
3432
3433 pr_debug("mix%d: valid=%d mode=%d, pic_dim:%dx%d intf_ip_w=%d enc_ip_w=%d ich_ovrd=%d\n",
3434 mixer->num, valid, common_mode, pic_width, pic_height,
3435 intf_ip_w, enc_ip_w, ich_reset_override);
3436
3437 MDSS_XLOG(mixer->num, valid, common_mode, pic_width, pic_height,
3438 intf_ip_w, enc_ip_w, ich_reset_override);
3439}
3440
3441void mdss_mdp_ctl_dsc_setup(struct mdss_mdp_ctl *ctl,
3442 struct mdss_panel_info *pinfo)
3443{
3444 struct mdss_mdp_ctl *sctl;
3445 struct mdss_panel_info *spinfo;
3446
Naseer Ahmed1970f5d2016-10-20 15:53:54 -04003447 /*
3448 * Check for dynamic resolution switch from DSC On to DSC Off
3449 * and disable DSC
3450 */
3451 if ((ctl->pending_mode_switch == SWITCH_RESOLUTION) &&
3452 ctl->is_master &&
3453 (!is_dsc_compression(pinfo))) {
3454 if (ctl->mixer_left && ctl->mixer_left->dsc_enabled)
3455 __dsc_disable(ctl->mixer_left);
3456 if (ctl->mixer_right && ctl->mixer_right->dsc_enabled)
3457 __dsc_disable(ctl->mixer_right);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303458 return;
Naseer Ahmed1970f5d2016-10-20 15:53:54 -04003459 }
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303460
3461 if (!ctl->is_master) {
3462 pr_debug("skip slave ctl because master will program for both\n");
3463 return;
3464 }
3465
3466 switch (ctl->mfd->split_mode) {
3467 case MDP_DUAL_LM_SINGLE_DISPLAY:
3468 __dsc_setup_dual_lm_single_display(ctl, pinfo);
3469 break;
3470 case MDP_DUAL_LM_DUAL_DISPLAY:
3471 sctl = mdss_mdp_get_split_ctl(ctl);
3472 if (sctl) {
3473 spinfo = &sctl->panel_data->panel_info;
3474 __dsc_setup_dual_lm_dual_display(ctl, pinfo, sctl,
3475 spinfo);
3476 }
3477 break;
3478 default:
3479 /* pp_split is not supported yet */
3480 __dsc_setup_single_lm_single_display(ctl, pinfo);
3481 break;
3482 }
3483}
3484
3485static int mdss_mdp_ctl_fbc_enable(int enable,
3486 struct mdss_mdp_mixer *mixer, struct mdss_panel_info *pdata)
3487{
3488 struct fbc_panel_info *fbc;
3489 u32 mode = 0, budget_ctl = 0, lossy_mode = 0, width;
3490
3491 if (!pdata) {
3492 pr_err("Invalid pdata\n");
3493 return -EINVAL;
3494 }
3495
3496 fbc = &pdata->fbc;
3497
3498 if (!fbc->enabled) {
3499 pr_debug("FBC not enabled\n");
3500 return -EINVAL;
3501 }
3502
3503 if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0 ||
3504 mixer->num == MDSS_MDP_INTF_LAYERMIXER1) {
3505 pr_debug("Mixer supports FBC.\n");
3506 } else {
3507 pr_debug("Mixer doesn't support FBC.\n");
3508 return -EINVAL;
3509 }
3510
3511 if (enable) {
3512 if (fbc->enc_mode && pdata->bpp) {
3513 /* width is the compressed width */
3514 width = mult_frac(pdata->xres, fbc->target_bpp,
3515 pdata->bpp);
3516 } else {
3517 /* width is the source width */
3518 width = pdata->xres;
3519 }
3520
3521 mode = ((width) << 16) | ((fbc->slice_height) << 11) |
3522 ((fbc->pred_mode) << 10) | ((fbc->enc_mode) << 9) |
3523 ((fbc->comp_mode) << 8) | ((fbc->qerr_enable) << 7) |
3524 ((fbc->cd_bias) << 4) | ((fbc->pat_enable) << 3) |
3525 ((fbc->vlc_enable) << 2) | ((fbc->bflc_enable) << 1) |
3526 enable;
3527
3528 budget_ctl = ((fbc->line_x_budget) << 12) |
3529 ((fbc->block_x_budget) << 8) | fbc->block_budget;
3530
3531 lossy_mode = ((fbc->max_pred_err) << 28) |
3532 ((fbc->lossless_mode_thd) << 16) |
3533 ((fbc->lossy_mode_thd) << 8) |
3534 ((fbc->lossy_rgb_thd) << 4) | fbc->lossy_mode_idx;
3535 }
3536
3537 mdss_mdp_pingpong_write(mixer->pingpong_base,
3538 MDSS_MDP_REG_PP_FBC_MODE, mode);
3539 mdss_mdp_pingpong_write(mixer->pingpong_base,
3540 MDSS_MDP_REG_PP_FBC_BUDGET_CTL, budget_ctl);
3541 mdss_mdp_pingpong_write(mixer->pingpong_base,
3542 MDSS_MDP_REG_PP_FBC_LOSSY_MODE, lossy_mode);
3543
3544 return 0;
3545}
3546
3547int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
3548{
3549 struct mdss_mdp_ctl *split_ctl;
3550 u32 width, height;
3551 int split_fb, rc = 0;
3552 u32 max_mixer_width;
3553 struct mdss_panel_info *pinfo;
3554
3555 if (!ctl || !ctl->panel_data) {
3556 pr_err("invalid ctl handle\n");
3557 return -ENODEV;
3558 }
3559
3560 pinfo = &ctl->panel_data->panel_info;
3561 if (pinfo->type == WRITEBACK_PANEL) {
3562 pr_err("writeback panel, ignore\n");
3563 return 0;
3564 }
3565
3566 split_ctl = mdss_mdp_get_split_ctl(ctl);
3567
3568 width = get_panel_width(ctl);
3569 height = get_panel_yres(pinfo);
3570
3571 max_mixer_width = ctl->mdata->max_mixer_width;
3572
3573 split_fb = ((is_dual_lm_single_display(ctl->mfd)) &&
3574 (ctl->mfd->split_fb_left <= max_mixer_width) &&
3575 (ctl->mfd->split_fb_right <= max_mixer_width)) ? 1 : 0;
3576 pr_debug("max=%d xres=%d left=%d right=%d\n", max_mixer_width,
3577 width, ctl->mfd->split_fb_left, ctl->mfd->split_fb_right);
3578
3579 if ((split_ctl && (width > max_mixer_width)) ||
3580 (width > (2 * max_mixer_width))) {
3581 pr_err("Unsupported panel resolution: %dx%d\n", width, height);
3582 return -ENOTSUPP;
3583 }
3584
3585 ctl->width = width;
3586 ctl->height = height;
3587 ctl->roi = (struct mdss_rect) {0, 0, width, height};
3588
3589 if (!ctl->mixer_left) {
3590 ctl->mixer_left =
3591 mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF,
3592 ((width > max_mixer_width) || split_fb), 0);
3593 if (!ctl->mixer_left) {
3594 pr_err("unable to allocate layer mixer\n");
3595 return -ENOMEM;
3596 } else if (split_fb && ctl->mixer_left->num >= 1 &&
3597 (ctl->panel_data->panel_info.type == MIPI_CMD_PANEL)) {
3598 pr_err("use only DSPP0 and DSPP1 with cmd split\n");
3599 return -EPERM;
3600 }
3601 }
3602
3603 if (split_fb) {
3604 width = ctl->mfd->split_fb_left;
3605 width += (pinfo->lcdc.border_left +
3606 pinfo->lcdc.border_right);
3607 } else if (width > max_mixer_width) {
3608 width /= 2;
3609 }
3610
3611 ctl->mixer_left->width = width;
3612 ctl->mixer_left->height = height;
3613 ctl->mixer_left->roi = (struct mdss_rect) {0, 0, width, height};
3614 ctl->mixer_left->valid_roi = true;
3615 ctl->mixer_left->roi_changed = true;
3616
3617 if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
3618 pr_debug("dual display detected\n");
3619 } else {
3620 if (split_fb)
3621 width = ctl->mfd->split_fb_right;
3622
3623 if (width < ctl->width) {
3624 if (ctl->mixer_right == NULL) {
3625 ctl->mixer_right = mdss_mdp_mixer_alloc(ctl,
3626 MDSS_MDP_MIXER_TYPE_INTF, true, 0);
3627 if (!ctl->mixer_right) {
3628 pr_err("unable to allocate right mixer\n");
3629 if (ctl->mixer_left)
3630 mdss_mdp_mixer_free(
3631 ctl->mixer_left);
3632 return -ENOMEM;
3633 }
3634 }
3635 ctl->mixer_right->is_right_mixer = true;
3636 ctl->mixer_right->width = width;
3637 ctl->mixer_right->height = height;
3638 ctl->mixer_right->roi = (struct mdss_rect)
3639 {0, 0, width, height};
3640 ctl->mixer_right->valid_roi = true;
3641 ctl->mixer_right->roi_changed = true;
3642 } else if (ctl->mixer_right) {
3643 ctl->mixer_right->valid_roi = false;
3644 ctl->mixer_right->roi_changed = false;
3645 mdss_mdp_mixer_free(ctl->mixer_right);
3646 ctl->mixer_right = NULL;
3647 }
3648
3649 if (ctl->mixer_right) {
3650 if (!is_dsc_compression(pinfo) ||
3651 (pinfo->dsc_enc_total == 1))
3652 ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
3653 MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
3654 } else {
3655 ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
3656 MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
3657 }
3658 }
3659
3660 rc = mdss_mdp_pp_default_overlay_config(ctl->mfd, ctl->panel_data,
3661 true);
3662 /*
3663 * Ignore failure of PP config, ctl set-up can succeed.
3664 */
3665 if (rc) {
3666 pr_err("failed to set the pp config rc %dfb %d\n", rc,
3667 ctl->mfd->index);
3668 rc = 0;
3669 }
3670 return 0;
3671}
3672
3673/**
3674 * mdss_mdp_ctl_reconfig() - re-configure ctl for new mode
3675 * @ctl: mdp controller.
3676 * @pdata: panel data
3677 *
3678 * This function is called when we are trying to dynamically change
3679 * the DSI mode. We need to change various mdp_ctl properties to
3680 * the new mode of operation.
3681 */
3682int mdss_mdp_ctl_reconfig(struct mdss_mdp_ctl *ctl,
3683 struct mdss_panel_data *pdata)
3684{
3685 void *tmp;
3686 int ret = 0;
3687
3688 /*
3689 * Switch first to prevent deleting important data in the case
3690 * where panel type is not supported in reconfig
3691 */
3692 if ((pdata->panel_info.type != MIPI_VIDEO_PANEL) &&
3693 (pdata->panel_info.type != MIPI_CMD_PANEL)) {
3694 pr_err("unsupported panel type (%d)\n", pdata->panel_info.type);
3695 return -EINVAL;
3696 }
3697
3698 /* if only changing resolution there is no need for intf reconfig */
3699 if (!ctl->is_video_mode == (pdata->panel_info.type == MIPI_CMD_PANEL))
3700 goto skip_intf_reconfig;
3701
3702 /*
3703 * Intentionally not clearing stop function, as stop will
3704 * be called after panel is instructed mode switch is happening
3705 */
3706 tmp = ctl->ops.stop_fnc;
3707 memset(&ctl->ops, 0, sizeof(ctl->ops));
3708 ctl->ops.stop_fnc = tmp;
3709
3710 switch (pdata->panel_info.type) {
3711 case MIPI_VIDEO_PANEL:
3712 ctl->is_video_mode = true;
3713 ctl->intf_type = MDSS_INTF_DSI;
3714 ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
3715 ctl->ops.start_fnc = mdss_mdp_video_start;
3716 break;
3717 case MIPI_CMD_PANEL:
3718 ctl->is_video_mode = false;
3719 ctl->intf_type = MDSS_INTF_DSI;
3720 ctl->opmode = MDSS_MDP_CTL_OP_CMD_MODE;
3721 ctl->ops.start_fnc = mdss_mdp_cmd_start;
3722 break;
3723 }
3724
3725 ctl->is_secure = false;
3726 ctl->split_flush_en = false;
3727 ctl->perf_release_ctl_bw = false;
3728 ctl->play_cnt = 0;
3729
3730 ctl->opmode |= (ctl->intf_num << 4);
3731
3732skip_intf_reconfig:
3733 ctl->width = get_panel_xres(&pdata->panel_info);
3734 ctl->height = get_panel_yres(&pdata->panel_info);
3735
3736 if (ctl->mfd->split_mode == MDP_DUAL_LM_SINGLE_DISPLAY) {
3737 if (ctl->mixer_left) {
3738 ctl->mixer_left->width = ctl->width / 2;
3739 ctl->mixer_left->height = ctl->height;
3740 }
3741 if (ctl->mixer_right) {
3742 ctl->mixer_right->width = ctl->width / 2;
3743 ctl->mixer_right->height = ctl->height;
3744 }
Naseer Ahmed1970f5d2016-10-20 15:53:54 -04003745
3746 /*
3747 * If we are transitioning from DSC On + DSC Merge to DSC Off
3748 * the 3D mux needs to be enabled
3749 */
3750 if (!is_dsc_compression(&pdata->panel_info) &&
3751 ctl->mixer_left &&
3752 ctl->mixer_left->dsc_enabled &&
3753 ctl->mixer_left->dsc_merge_enabled) {
3754 ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
3755 MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
3756 }
3757
3758 /*
3759 * If we are transitioning from DSC Off to DSC On + DSC Merge
3760 * the 3D mux needs to be disabled
3761 */
3762 if (is_dsc_compression(&pdata->panel_info) &&
3763 ctl->mixer_left &&
3764 !ctl->mixer_left->dsc_enabled &&
3765 pdata->panel_info.dsc_enc_total != 1) {
3766 ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
3767 MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
3768 }
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303769 } else {
3770 /*
3771 * Handles MDP_SPLIT_MODE_NONE, MDP_DUAL_LM_DUAL_DISPLAY and
3772 * MDP_PINGPONG_SPLIT case.
3773 */
3774 if (ctl->mixer_left) {
3775 ctl->mixer_left->width = ctl->width;
3776 ctl->mixer_left->height = ctl->height;
3777 }
3778 }
3779 ctl->roi = (struct mdss_rect) {0, 0, ctl->width, ctl->height};
3780
3781 ctl->border_x_off = pdata->panel_info.lcdc.border_left;
3782 ctl->border_y_off = pdata->panel_info.lcdc.border_top;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303783 return ret;
3784}
3785
3786struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
3787 struct msm_fb_data_type *mfd)
3788{
3789 int ret = 0, offset;
3790 struct mdss_mdp_ctl *ctl;
3791 struct mdss_data_type *mdata = mfd_to_mdata(mfd);
3792 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
3793 struct mdss_panel_info *pinfo;
3794
3795 if (pdata->panel_info.type == WRITEBACK_PANEL)
3796 offset = mdss_mdp_get_wb_ctl_support(mdata, false);
3797 else
3798 offset = MDSS_MDP_CTL0;
3799
3800 if (is_pingpong_split(mfd) && !mdata->has_pingpong_split) {
3801 pr_err("Error: pp_split cannot be enabled on fb%d if HW doesn't support it\n",
3802 mfd->index);
3803 return ERR_PTR(-EINVAL);
3804 }
3805
3806 ctl = mdss_mdp_ctl_alloc(mdata, offset);
3807 if (!ctl) {
3808 pr_err("unable to allocate ctl\n");
3809 return ERR_PTR(-ENOMEM);
3810 }
3811
3812 pinfo = &pdata->panel_info;
3813 ctl->mfd = mfd;
3814 ctl->panel_data = pdata;
3815 ctl->is_video_mode = false;
3816 ctl->perf_release_ctl_bw = false;
3817 ctl->border_x_off = pinfo->lcdc.border_left;
3818 ctl->border_y_off = pinfo->lcdc.border_top;
3819 ctl->disable_prefill = false;
3820
3821 switch (pdata->panel_info.type) {
3822 case EDP_PANEL:
3823 ctl->is_video_mode = true;
3824 ctl->intf_num = MDSS_MDP_INTF0;
3825 ctl->intf_type = MDSS_INTF_EDP;
3826 ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
3827 ctl->ops.start_fnc = mdss_mdp_video_start;
3828 break;
3829 case MIPI_VIDEO_PANEL:
3830 ctl->is_video_mode = true;
3831 if (pdata->panel_info.pdest == DISPLAY_1)
3832 ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF2 :
3833 MDSS_MDP_INTF1;
3834 else
3835 ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF1 :
3836 MDSS_MDP_INTF2;
3837 ctl->intf_type = MDSS_INTF_DSI;
3838 ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
3839 ctl->ops.start_fnc = mdss_mdp_video_start;
3840 break;
3841 case MIPI_CMD_PANEL:
3842 if (pdata->panel_info.pdest == DISPLAY_1)
3843 ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF2 :
3844 MDSS_MDP_INTF1;
3845 else
3846 ctl->intf_num = mdp5_data->mixer_swap ? MDSS_MDP_INTF1 :
3847 MDSS_MDP_INTF2;
3848 ctl->intf_type = MDSS_INTF_DSI;
3849 ctl->opmode = MDSS_MDP_CTL_OP_CMD_MODE;
3850 ctl->ops.start_fnc = mdss_mdp_cmd_start;
3851 INIT_WORK(&ctl->cpu_pm_work, __cpu_pm_work_handler);
3852 break;
3853 case DTV_PANEL:
3854 ctl->is_video_mode = true;
3855 ctl->intf_num = MDSS_MDP_INTF3;
3856 ctl->intf_type = MDSS_INTF_HDMI;
3857 ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
3858 ctl->ops.start_fnc = mdss_mdp_video_start;
3859 break;
3860 case WRITEBACK_PANEL:
3861 ctl->intf_num = MDSS_MDP_NO_INTF;
3862 ctl->ops.start_fnc = mdss_mdp_writeback_start;
3863 break;
3864 default:
3865 pr_err("unsupported panel type (%d)\n", pdata->panel_info.type);
3866 ret = -EINVAL;
3867 goto ctl_init_fail;
3868 }
3869
3870 ctl->opmode |= (ctl->intf_num << 4);
3871
3872 if (ctl->intf_num == MDSS_MDP_NO_INTF) {
3873 ctl->dst_format = pdata->panel_info.out_format;
3874 } else {
3875 switch (pdata->panel_info.bpp) {
3876 case 18:
3877 if (ctl->intf_type == MDSS_INTF_DSI)
3878 ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666 |
3879 MDSS_MDP_PANEL_FORMAT_PACK_ALIGN_MSB;
3880 else
3881 ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666;
3882 break;
3883 case 24:
3884 default:
3885 ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB888;
3886 break;
3887 }
3888 }
3889
3890 return ctl;
3891ctl_init_fail:
3892 mdss_mdp_ctl_free(ctl);
3893
3894 return ERR_PTR(ret);
3895}
3896
3897int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
3898 struct mdss_panel_data *pdata)
3899{
3900 struct mdss_mdp_ctl *sctl;
3901 struct mdss_mdp_mixer *mixer;
3902
3903 if (!ctl || !pdata)
3904 return -ENODEV;
3905
3906 if (pdata->panel_info.xres > ctl->mdata->max_mixer_width) {
3907 pr_err("Unsupported second panel resolution: %dx%d\n",
3908 pdata->panel_info.xres, pdata->panel_info.yres);
3909 return -ENOTSUPP;
3910 }
3911
3912 if (ctl->mixer_right) {
3913 pr_err("right mixer already setup for ctl=%d\n", ctl->num);
3914 return -EPERM;
3915 }
3916
3917 sctl = mdss_mdp_ctl_init(pdata, ctl->mfd);
3918 if (!sctl) {
3919 pr_err("unable to setup split display\n");
3920 return -ENODEV;
3921 }
3922
3923 sctl->width = get_panel_xres(&pdata->panel_info);
3924 sctl->height = get_panel_yres(&pdata->panel_info);
3925
3926 sctl->roi = (struct mdss_rect){0, 0, sctl->width, sctl->height};
3927
3928 if (!ctl->mixer_left) {
3929 ctl->mixer_left = mdss_mdp_mixer_alloc(ctl,
3930 MDSS_MDP_MIXER_TYPE_INTF,
3931 false, 0);
3932 if (!ctl->mixer_left) {
3933 pr_err("unable to allocate layer mixer\n");
3934 mdss_mdp_ctl_destroy(sctl);
3935 return -ENOMEM;
3936 }
3937 }
3938
3939 mixer = mdss_mdp_mixer_alloc(sctl, MDSS_MDP_MIXER_TYPE_INTF, false, 0);
3940 if (!mixer) {
3941 pr_err("unable to allocate layer mixer\n");
3942 mdss_mdp_ctl_destroy(sctl);
3943 return -ENOMEM;
3944 }
3945
3946 mixer->is_right_mixer = true;
3947 mixer->width = sctl->width;
3948 mixer->height = sctl->height;
3949 mixer->roi = (struct mdss_rect)
3950 {0, 0, mixer->width, mixer->height};
3951 mixer->valid_roi = true;
3952 mixer->roi_changed = true;
3953 sctl->mixer_left = mixer;
3954
3955 return mdss_mdp_set_split_ctl(ctl, sctl);
3956}
3957
3958static void mdss_mdp_ctl_split_display_enable(int enable,
3959 struct mdss_mdp_ctl *main_ctl, struct mdss_mdp_ctl *slave_ctl)
3960{
3961 u32 upper = 0, lower = 0;
3962
3963 pr_debug("split main ctl=%d intf=%d\n",
3964 main_ctl->num, main_ctl->intf_num);
3965
3966 if (slave_ctl)
3967 pr_debug("split slave ctl=%d intf=%d\n",
3968 slave_ctl->num, slave_ctl->intf_num);
3969
3970 if (enable) {
3971 if (main_ctl->opmode & MDSS_MDP_CTL_OP_CMD_MODE) {
3972 /* interface controlling sw trigger (cmd mode) */
3973 lower |= BIT(1);
3974 if (main_ctl->intf_num == MDSS_MDP_INTF2)
3975 lower |= BIT(4);
3976 else
3977 lower |= BIT(8);
3978 /*
3979 * Enable SMART_PANEL_FREE_RUN if ping pong split
3980 * is enabled.
3981 */
3982 if (is_pingpong_split(main_ctl->mfd))
3983 lower |= BIT(2);
3984 upper = lower;
3985 } else {
3986 /* interface controlling sw trigger (video mode) */
3987 if (main_ctl->intf_num == MDSS_MDP_INTF2) {
3988 lower |= BIT(4);
3989 upper |= BIT(8);
3990 } else {
3991 lower |= BIT(8);
3992 upper |= BIT(4);
3993 }
3994 }
3995 }
3996 writel_relaxed(upper, main_ctl->mdata->mdp_base +
3997 MDSS_MDP_REG_SPLIT_DISPLAY_UPPER_PIPE_CTRL);
3998 writel_relaxed(lower, main_ctl->mdata->mdp_base +
3999 MDSS_MDP_REG_SPLIT_DISPLAY_LOWER_PIPE_CTRL);
4000 writel_relaxed(enable, main_ctl->mdata->mdp_base +
4001 MDSS_MDP_REG_SPLIT_DISPLAY_EN);
4002
4003 if ((main_ctl->mdata->mdp_rev >= MDSS_MDP_HW_REV_103)
4004 && main_ctl->is_video_mode) {
4005 struct mdss_overlay_private *mdp5_data;
4006 bool mixer_swap = false;
4007
4008 if (main_ctl->mfd) {
4009 mdp5_data = mfd_to_mdp5_data(main_ctl->mfd);
4010 mixer_swap = mdp5_data->mixer_swap;
4011 }
4012
4013 main_ctl->split_flush_en = !mixer_swap;
4014 if (main_ctl->split_flush_en)
4015 writel_relaxed(enable ? 0x1 : 0x0,
4016 main_ctl->mdata->mdp_base +
4017 MMSS_MDP_MDP_SSPP_SPARE_0);
4018 }
4019}
4020
4021static void mdss_mdp_ctl_pp_split_display_enable(bool enable,
4022 struct mdss_mdp_ctl *ctl)
4023{
4024 u32 cfg = 0, cntl = 0;
4025
4026 if (!ctl->mdata->nppb_ctl || !ctl->mdata->nppb_cfg) {
4027 pr_err("No PPB to enable PP split\n");
4028 WARN_ON(1);
4029 }
4030
4031 mdss_mdp_ctl_split_display_enable(enable, ctl, NULL);
4032
4033 if (enable) {
4034 cfg = ctl->slave_intf_num << 20; /* Set slave intf */
4035 cfg |= BIT(16); /* Set horizontal split */
4036 cntl = BIT(5); /* enable dst split */
4037 }
4038
4039 writel_relaxed(cfg, ctl->mdata->mdp_base + ctl->mdata->ppb_cfg[0]);
4040 writel_relaxed(cntl, ctl->mdata->mdp_base + ctl->mdata->ppb_ctl[0]);
4041}
4042
4043int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl)
4044{
4045 struct mdss_mdp_ctl *sctl;
4046 int rc;
4047
4048 rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CLOSE, NULL,
4049 CTL_INTF_EVENT_FLAG_DEFAULT);
4050 WARN(rc, "unable to close panel for intf=%d\n", ctl->intf_num);
4051
4052 (void) mdss_mdp_pp_default_overlay_config(ctl->mfd, ctl->panel_data,
4053 false);
4054
4055 sctl = mdss_mdp_get_split_ctl(ctl);
4056 if (sctl) {
4057 pr_debug("destroying split display ctl=%d\n", sctl->num);
4058 mdss_mdp_ctl_free(sctl);
4059 }
4060
4061 mdss_mdp_ctl_free(ctl);
4062
4063 return 0;
4064}
4065
4066int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg,
4067 u32 flags)
4068{
4069 struct mdss_panel_data *pdata;
4070 int rc = 0;
4071
4072 if (!ctl || !ctl->panel_data)
4073 return -ENODEV;
4074
4075 pdata = ctl->panel_data;
4076
4077 if (flags & CTL_INTF_EVENT_FLAG_SLAVE_INTF) {
4078 pdata = pdata->next;
4079 if (!pdata) {
4080 pr_err("Error: event=%d flags=0x%x, ctl%d slave intf is not present\n",
4081 event, flags, ctl->num);
4082 return -EINVAL;
4083 }
4084 }
4085
4086 pr_debug("sending ctl=%d event=%d flag=0x%x\n", ctl->num, event, flags);
4087
4088 do {
4089 if (pdata->event_handler)
4090 rc = pdata->event_handler(pdata, event, arg);
4091 pdata = pdata->next;
4092 } while (rc == 0 && pdata && pdata->active &&
4093 !(flags & CTL_INTF_EVENT_FLAG_SKIP_BROADCAST));
4094
4095 return rc;
4096}
4097
4098static void mdss_mdp_ctl_restore_sub(struct mdss_mdp_ctl *ctl)
4099{
4100 u32 temp;
4101 int ret = 0;
4102
4103 temp = readl_relaxed(ctl->mdata->mdp_base +
4104 MDSS_MDP_REG_DISP_INTF_SEL);
4105 temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
4106 writel_relaxed(temp, ctl->mdata->mdp_base +
4107 MDSS_MDP_REG_DISP_INTF_SEL);
4108
4109 if (ctl->mfd && ctl->panel_data) {
4110 ctl->mfd->ipc_resume = true;
4111 mdss_mdp_pp_resume(ctl->mfd);
4112
4113 if (is_dsc_compression(&ctl->panel_data->panel_info)) {
4114 /*
4115 * Avoid redundant call to dsc_setup when mode switch
4116 * is in progress. During the switch, dsc_setup is
4117 * handled in mdss_mode_switch() function.
4118 */
4119 if (ctl->pending_mode_switch != SWITCH_RESOLUTION)
4120 mdss_mdp_ctl_dsc_setup(ctl,
4121 &ctl->panel_data->panel_info);
4122 } else if (ctl->panel_data->panel_info.compression_mode ==
4123 COMPRESSION_FBC) {
4124 ret = mdss_mdp_ctl_fbc_enable(1, ctl->mixer_left,
4125 &ctl->panel_data->panel_info);
4126 if (ret)
4127 pr_err("Failed to restore FBC mode\n");
4128 }
4129 }
4130}
4131
4132/*
4133 * mdss_mdp_ctl_restore() - restore mdp ctl path
4134 * @locked - boolean to signal that clock lock is already acquired
4135 *
4136 * This function is called whenever MDP comes out of a power collapse as
4137 * a result of a screen update. It restores the MDP controller's software
4138 * state to the hardware registers.
4139 * Function does not enable the clocks, so caller must make sure
4140 * clocks are enabled before calling.
4141 * The locked boolean in the parametrs signals that synchronization
4142 * with mdp clocks access is not required downstream.
4143 * Only call this function setting this value to true if the clocks access
4144 * synchronization is guaranteed by the caller.
4145 */
4146void mdss_mdp_ctl_restore(bool locked)
4147{
4148 struct mdss_mdp_ctl *ctl = NULL;
4149 struct mdss_mdp_ctl *sctl;
4150 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4151 u32 cnum;
4152
4153 for (cnum = MDSS_MDP_CTL0; cnum < mdata->nctl; cnum++) {
4154 ctl = mdata->ctl_off + cnum;
4155 if (!mdss_mdp_ctl_is_power_on(ctl))
4156 continue;
4157
4158 pr_debug("restoring ctl%d, intf_type=%d\n", cnum,
4159 ctl->intf_type);
4160 ctl->play_cnt = 0;
4161 sctl = mdss_mdp_get_split_ctl(ctl);
4162 mdss_mdp_ctl_restore_sub(ctl);
4163 if (sctl) {
4164 mdss_mdp_ctl_restore_sub(sctl);
4165 mdss_mdp_ctl_split_display_enable(1, ctl, sctl);
4166 } else if (is_pingpong_split(ctl->mfd)) {
4167 mdss_mdp_ctl_pp_split_display_enable(1, ctl);
4168 }
4169
4170 if (ctl->ops.restore_fnc)
4171 ctl->ops.restore_fnc(ctl, locked);
4172 }
4173}
4174
4175static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl, bool handoff)
4176{
4177 struct mdss_mdp_mixer *mixer;
4178 u32 outsize, temp;
4179 int ret = 0;
4180 int i, nmixers;
4181
4182 pr_debug("ctl_num=%d\n", ctl->num);
4183
4184 /*
4185 * Need start_fnc in 2 cases:
4186 * (1) handoff
4187 * (2) continuous splash finished.
4188 */
4189 if (handoff || !ctl->panel_data->panel_info.cont_splash_enabled) {
4190 if (ctl->ops.start_fnc)
4191 ret = ctl->ops.start_fnc(ctl);
4192 else
4193 pr_warn("no start function for ctl=%d type=%d\n",
4194 ctl->num,
4195 ctl->panel_data->panel_info.type);
4196
4197 if (ret) {
4198 pr_err("unable to start intf\n");
4199 return ret;
4200 }
4201 }
4202
4203 if (!ctl->panel_data->panel_info.cont_splash_enabled) {
4204 nmixers = MDSS_MDP_INTF_MAX_LAYERMIXER +
4205 MDSS_MDP_WB_MAX_LAYERMIXER;
4206 for (i = 0; i < nmixers; i++)
4207 mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_LAYER(i), 0);
4208 }
4209
4210 temp = readl_relaxed(ctl->mdata->mdp_base +
4211 MDSS_MDP_REG_DISP_INTF_SEL);
4212 temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
4213 if (is_pingpong_split(ctl->mfd))
4214 temp |= (ctl->intf_type << (ctl->intf_num * 8));
4215
4216 writel_relaxed(temp, ctl->mdata->mdp_base +
4217 MDSS_MDP_REG_DISP_INTF_SEL);
4218
4219 mixer = ctl->mixer_left;
4220 if (mixer) {
4221 struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
4222
4223 mixer->params_changed++;
4224
4225 outsize = (mixer->height << 16) | mixer->width;
4226 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
4227
4228 if (is_dsc_compression(pinfo)) {
4229 mdss_mdp_ctl_dsc_setup(ctl, pinfo);
4230 } else if (pinfo->compression_mode == COMPRESSION_FBC) {
4231 ret = mdss_mdp_ctl_fbc_enable(1, ctl->mixer_left,
4232 pinfo);
4233 }
4234 }
4235 return ret;
4236}
4237
4238int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl, bool handoff)
4239{
4240 struct mdss_mdp_ctl *sctl;
4241 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4242 int ret = 0;
4243
4244 pr_debug("ctl_num=%d, power_state=%d\n", ctl->num, ctl->power_state);
4245
4246 if (mdss_mdp_ctl_is_power_on_interactive(ctl)
4247 && !(ctl->pending_mode_switch)) {
4248 pr_debug("%d: panel already on!\n", __LINE__);
4249 return 0;
4250 }
4251
4252 if (mdss_mdp_ctl_is_power_off(ctl)) {
4253 ret = mdss_mdp_ctl_setup(ctl);
4254 if (ret)
4255 return ret;
4256 }
4257
4258 sctl = mdss_mdp_get_split_ctl(ctl);
4259
4260 mutex_lock(&ctl->lock);
4261
4262 if (mdss_mdp_ctl_is_power_off(ctl))
4263 memset(&ctl->cur_perf, 0, sizeof(ctl->cur_perf));
4264
4265 /*
4266 * keep power_on false during handoff to avoid unexpected
4267 * operations to overlay.
4268 */
4269 if (!handoff || ctl->pending_mode_switch)
4270 ctl->power_state = MDSS_PANEL_POWER_ON;
4271
4272 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
4273
4274 ret = mdss_mdp_ctl_start_sub(ctl, handoff);
4275 if (ret == 0) {
4276 if (sctl && ctl->mfd &&
4277 ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
4278 /*split display available */
4279 ret = mdss_mdp_ctl_start_sub(sctl, handoff);
4280 if (!ret)
4281 mdss_mdp_ctl_split_display_enable(1, ctl, sctl);
4282 } else if (ctl->mixer_right) {
4283 struct mdss_mdp_mixer *mixer = ctl->mixer_right;
4284 u32 out;
4285
4286 mixer->params_changed++;
4287 out = (mixer->height << 16) | mixer->width;
4288 mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, out);
4289 mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0);
4290 } else if (is_pingpong_split(ctl->mfd)) {
4291 ctl->slave_intf_num = (ctl->intf_num + 1);
4292 mdss_mdp_ctl_pp_split_display_enable(true, ctl);
4293 }
4294 }
4295
4296 mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_RESUME);
4297
4298 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
4299 mutex_unlock(&ctl->lock);
4300
4301 return ret;
4302}
4303
4304int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl, int power_state)
4305{
4306 struct mdss_mdp_ctl *sctl;
4307 int ret = 0;
4308 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4309
4310 pr_debug("ctl_num=%d, power_state=%d\n", ctl->num, ctl->power_state);
4311
4312 if (!ctl->mfd->panel_reconfig && !mdss_mdp_ctl_is_power_on(ctl)) {
4313 pr_debug("%s %d already off!\n", __func__, __LINE__);
4314 return 0;
4315 }
4316
4317 sctl = mdss_mdp_get_split_ctl(ctl);
4318
4319 mutex_lock(&ctl->lock);
4320
4321 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
4322
4323 mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_SUSPEND);
4324
4325 if (ctl->ops.stop_fnc) {
4326 ret = ctl->ops.stop_fnc(ctl, power_state);
4327 if (ctl->panel_data->panel_info.compression_mode ==
4328 COMPRESSION_FBC) {
4329 mdss_mdp_ctl_fbc_enable(0, ctl->mixer_left,
4330 &ctl->panel_data->panel_info);
4331 }
4332 } else {
4333 pr_warn("no stop func for ctl=%d\n", ctl->num);
4334 }
4335
4336 if (sctl && sctl->ops.stop_fnc) {
4337 ret = sctl->ops.stop_fnc(sctl, power_state);
4338 if (sctl->panel_data->panel_info.compression_mode ==
4339 COMPRESSION_FBC) {
4340 mdss_mdp_ctl_fbc_enable(0, sctl->mixer_left,
4341 &sctl->panel_data->panel_info);
4342 }
4343 }
4344 if (ret) {
4345 pr_warn("error powering off intf ctl=%d\n", ctl->num);
4346 goto end;
4347 }
4348
4349 if (mdss_panel_is_power_on(power_state)) {
4350 pr_debug("panel is not off, leaving ctl power on\n");
4351 goto end;
4352 }
4353
4354 if (sctl)
4355 mdss_mdp_ctl_split_display_enable(0, ctl, sctl);
4356
4357 mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, 0);
4358 if (sctl) {
4359 mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, 0);
4360 mdss_mdp_reset_mixercfg(sctl);
4361 }
4362
4363 mdss_mdp_reset_mixercfg(ctl);
4364
4365 ctl->play_cnt = 0;
4366
4367end:
4368 if (!ret) {
4369 ctl->power_state = power_state;
4370 if (!ctl->pending_mode_switch)
4371 mdss_mdp_ctl_perf_update(ctl, 0, true);
4372 }
4373 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
4374
4375 mutex_unlock(&ctl->lock);
4376
4377 return ret;
4378}
4379
4380/*
4381 * mdss_mdp_pipe_reset() - Halts all the pipes during ctl reset.
4382 * @mixer: Mixer from which to reset all pipes.
4383 * This function called during control path reset and will halt
4384 * all the pipes staged on the mixer.
4385 */
4386static void mdss_mdp_pipe_reset(struct mdss_mdp_mixer *mixer, bool is_recovery)
4387{
4388 unsigned long pipe_map;
4389 u32 bit = 0;
4390 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4391 bool sw_rst_avail = mdss_mdp_pipe_is_sw_reset_available(mdata);
4392
4393 if (!mixer)
4394 return;
4395
4396 pipe_map = mixer->pipe_mapped;
4397 pr_debug("pipe_map=0x%lx\n", pipe_map);
4398 for_each_set_bit_from(bit, &pipe_map, MAX_PIPES_PER_LM) {
4399 struct mdss_mdp_pipe *pipe;
4400
4401 /*
4402 * this assumes that within lm there can be either rect0+rect1
4403 * or rect0 only. Thus to find the hardware pipe to halt only
4404 * check for rect 0 is sufficient.
4405 */
4406 pipe = mdss_mdp_pipe_search(mdata, 1 << bit,
4407 MDSS_MDP_PIPE_RECT0);
4408 if (pipe) {
4409 mdss_mdp_pipe_fetch_halt(pipe, is_recovery);
4410 if (sw_rst_avail)
4411 mdss_mdp_pipe_clk_force_off(pipe);
4412 }
4413 }
4414}
4415
4416static u32 mdss_mdp_poll_ctl_reset_status(struct mdss_mdp_ctl *ctl, u32 cnt)
4417{
4418 u32 status;
4419 /*
4420 * it takes around 30us to have mdp finish resetting its ctl path
4421 * poll every 50us so that reset should be completed at 1st poll
4422 */
4423 do {
4424 udelay(50);
4425 status = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_SW_RESET);
4426 status &= 0x01;
4427 pr_debug("status=%x, count=%d\n", status, cnt);
4428 cnt--;
4429 } while (cnt > 0 && status);
4430
4431 return status;
4432}
4433
4434/*
4435 * mdss_mdp_check_ctl_reset_status() - checks ctl reset status
4436 * @ctl: mdp controller
4437 *
4438 * This function checks the ctl reset status before every frame update.
4439 * If the reset bit is set, it keeps polling the status till the hw
4440 * reset is complete. And does a panic if hw fails to complet the reset
4441 * with in the max poll interval.
4442 */
4443void mdss_mdp_check_ctl_reset_status(struct mdss_mdp_ctl *ctl)
4444{
4445 u32 status;
4446
4447 if (!ctl)
4448 return;
4449
4450 status = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_SW_RESET);
4451 status &= 0x01;
4452 if (!status)
4453 return;
4454
4455 pr_debug("hw ctl reset is set for ctl:%d\n", ctl->num);
Ingrid Gallardo07bef042016-08-18 19:38:13 -07004456 /* poll for at least ~1 frame */
4457 status = mdss_mdp_poll_ctl_reset_status(ctl, 320);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304458 if (status) {
Ingrid Gallardo07bef042016-08-18 19:38:13 -07004459 pr_err("hw recovery is not complete for ctl:%d status:0x%x\n",
4460 ctl->num, status);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304461 MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt", "dbg_bus",
Sandeep Panda812f5002017-02-24 11:36:59 +05304462 "vbif_dbg_bus", "dsi_dbg_bus", "panic");
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304463 }
4464}
4465
4466/*
4467 * mdss_mdp_ctl_reset() - reset mdp ctl path.
4468 * @ctl: mdp controller.
4469 * this function called when underflow happen,
4470 * it will reset mdp ctl path and poll for its completion
4471 *
4472 * Note: called within atomic context.
4473 */
4474int mdss_mdp_ctl_reset(struct mdss_mdp_ctl *ctl, bool is_recovery)
4475{
4476 u32 status;
4477 struct mdss_mdp_mixer *mixer;
4478
4479 if (!ctl) {
4480 pr_err("ctl not initialized\n");
4481 return -EINVAL;
4482 }
4483
4484 mixer = ctl->mixer_left;
4485 mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_SW_RESET, 1);
4486
4487 status = mdss_mdp_poll_ctl_reset_status(ctl, 20);
4488 if (status)
4489 pr_err("sw ctl:%d reset timedout\n", ctl->num);
4490
4491 if (mixer) {
4492 mdss_mdp_pipe_reset(mixer, is_recovery);
4493
4494 if (is_dual_lm_single_display(ctl->mfd) &&
4495 ctl->mixer_right)
4496 mdss_mdp_pipe_reset(ctl->mixer_right, is_recovery);
4497 }
4498
4499 return (status) ? -EAGAIN : 0;
4500}
4501
4502/*
4503 * mdss_mdp_mixer_update_pipe_map() - keep track of pipe configuration in mixer
4504 * @master_ctl: mdp controller.
4505 *
4506 * This function keeps track of the current mixer configuration in the hardware.
4507 * It's callers responsibility to call with master control.
4508 */
4509void mdss_mdp_mixer_update_pipe_map(struct mdss_mdp_ctl *master_ctl,
4510 int mixer_mux)
4511{
4512 struct mdss_mdp_mixer *mixer = mdss_mdp_mixer_get(master_ctl,
4513 mixer_mux);
4514
4515 if (!mixer)
4516 return;
4517
4518 pr_debug("mixer%d pipe_mapped=0x%x next_pipes=0x%x\n",
4519 mixer->num, mixer->pipe_mapped, mixer->next_pipe_map);
4520
4521 mixer->pipe_mapped = mixer->next_pipe_map;
4522}
4523
4524static void mdss_mdp_set_mixer_roi(struct mdss_mdp_mixer *mixer,
4525 struct mdss_rect *roi)
4526{
4527 mixer->valid_roi = (roi->w && roi->h);
4528 mixer->roi_changed = false;
4529
4530 if (!mdss_rect_cmp(roi, &mixer->roi)) {
4531 mixer->roi = *roi;
4532 mixer->params_changed++;
4533 mixer->roi_changed = true;
4534 }
4535
4536 pr_debug("mixer%d ROI %s: [%d, %d, %d, %d]\n",
4537 mixer->num, mixer->roi_changed ? "changed" : "not changed",
4538 mixer->roi.x, mixer->roi.y, mixer->roi.w, mixer->roi.h);
4539 MDSS_XLOG(mixer->num, mixer->roi_changed, mixer->valid_roi,
4540 mixer->roi.x, mixer->roi.y, mixer->roi.w, mixer->roi.h);
4541}
4542
4543/* only call from master ctl */
4544void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
4545 struct mdss_rect *l_roi, struct mdss_rect *r_roi)
4546{
4547 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4548 enum mdss_mdp_pu_type previous_frame_pu_type, current_frame_pu_type;
4549
4550 /* Reset ROI when we have (1) invalid ROI (2) feature disabled */
4551 if ((!l_roi->w && l_roi->h) || (l_roi->w && !l_roi->h) ||
4552 (!r_roi->w && r_roi->h) || (r_roi->w && !r_roi->h) ||
4553 (!l_roi->w && !l_roi->h && !r_roi->w && !r_roi->h) ||
4554 !ctl->panel_data->panel_info.partial_update_enabled) {
4555
4556 if (ctl->mixer_left) {
4557 *l_roi = (struct mdss_rect) {0, 0,
4558 ctl->mixer_left->width,
4559 ctl->mixer_left->height};
4560 }
4561
4562 if (ctl->mixer_right) {
4563 *r_roi = (struct mdss_rect) {0, 0,
4564 ctl->mixer_right->width,
4565 ctl->mixer_right->height};
4566 }
4567 }
4568
4569 previous_frame_pu_type = mdss_mdp_get_pu_type(ctl);
4570 if (ctl->mixer_left) {
4571 mdss_mdp_set_mixer_roi(ctl->mixer_left, l_roi);
4572 ctl->roi = ctl->mixer_left->roi;
4573 }
4574
4575 if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
4576 struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
4577
4578 if (sctl && sctl->mixer_left) {
4579 mdss_mdp_set_mixer_roi(sctl->mixer_left, r_roi);
4580 sctl->roi = sctl->mixer_left->roi;
4581 }
4582 } else if (is_dual_lm_single_display(ctl->mfd) && ctl->mixer_right) {
4583
4584 mdss_mdp_set_mixer_roi(ctl->mixer_right, r_roi);
4585
4586 /* in this case, CTL_ROI is a union of left+right ROIs. */
4587 ctl->roi.w += ctl->mixer_right->roi.w;
4588
4589 /* right_only, update roi.x as per CTL ROI guidelines */
4590 if (ctl->mixer_left && !ctl->mixer_left->valid_roi) {
4591 ctl->roi = ctl->mixer_right->roi;
4592 ctl->roi.x = left_lm_w_from_mfd(ctl->mfd) +
4593 ctl->mixer_right->roi.x;
4594 }
4595 }
4596
4597 current_frame_pu_type = mdss_mdp_get_pu_type(ctl);
4598
4599 /*
4600 * Force HW programming whenever partial update type changes
4601 * between two consecutive frames to avoid incorrect HW programming.
4602 */
4603 if (is_split_lm(ctl->mfd) && mdata->has_src_split &&
4604 (previous_frame_pu_type != current_frame_pu_type)) {
4605 if (ctl->mixer_left)
4606 ctl->mixer_left->roi_changed = true;
4607 if (ctl->mixer_right)
4608 ctl->mixer_right->roi_changed = true;
4609 }
4610}
4611
4612static void __mdss_mdp_mixer_update_cfg_masks(u32 pnum,
4613 enum mdss_mdp_pipe_rect rect_num,
4614 u32 stage, struct mdss_mdp_mixer_cfg *cfg)
4615{
4616 u32 masks[NUM_MIXERCFG_REGS] = { 0 };
4617 int i;
4618
4619 if (pnum >= MDSS_MDP_MAX_SSPP)
4620 return;
4621
4622 if (rect_num == MDSS_MDP_PIPE_RECT0) {
4623 masks[0] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].base, stage);
4624 masks[1] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].ext, stage);
4625 masks[2] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].ext2, stage);
4626 } else { /* RECT1 */
4627 masks[2] = mdss_mdp_hwio_mask(&mdp_pipe_rec1_hwio[pnum].ext2,
4628 stage);
4629 }
4630
4631 for (i = 0; i < NUM_MIXERCFG_REGS; i++)
4632 cfg->config_masks[i] |= masks[i];
4633
4634 pr_debug("pnum=%d stage=%d cfg=0x%08x ext=0x%08x\n",
4635 pnum, stage, masks[0], masks[1]);
4636}
4637
4638static void __mdss_mdp_mixer_get_offsets(u32 mixer_num,
4639 u32 *offsets, size_t count)
4640{
4641 WARN_ON(count < NUM_MIXERCFG_REGS);
4642
4643 offsets[0] = MDSS_MDP_REG_CTL_LAYER(mixer_num);
4644 offsets[1] = MDSS_MDP_REG_CTL_LAYER_EXTN(mixer_num);
4645 offsets[2] = MDSS_MDP_REG_CTL_LAYER_EXTN2(mixer_num);
4646}
4647
4648static inline int __mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
4649{
4650 /*
4651 * mapping to hardware expectation of actual mixer programming to
4652 * happen on following registers:
4653 * INTF: 0, 1, 2, 5
4654 * WB: 3, 4
4655 * With some exceptions on certain revisions
4656 */
4657 if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) {
4658 u32 wb_offset;
4659
4660 if (test_bit(MDSS_CAPS_MIXER_1_FOR_WB,
4661 mixer->ctl->mdata->mdss_caps_map))
4662 wb_offset = MDSS_MDP_INTF_LAYERMIXER1;
4663 else
4664 wb_offset = MDSS_MDP_INTF_LAYERMIXER3;
4665
4666 return mixer->num + wb_offset;
4667 } else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3) {
4668 return 5;
4669 } else {
4670 return mixer->num;
4671 }
4672}
4673
4674static inline void __mdss_mdp_mixer_write_layer(struct mdss_mdp_ctl *ctl,
4675 u32 mixer_num, u32 *values, size_t count)
4676{
4677 u32 off[NUM_MIXERCFG_REGS];
4678 int i;
4679
Animesh Kishorec410a9a2018-03-20 11:59:18 +05304680 if (WARN_ON(!values || count < NUM_MIXERCFG_REGS))
4681 return;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304682
4683 __mdss_mdp_mixer_get_offsets(mixer_num, off, ARRAY_SIZE(off));
4684
4685 for (i = 0; i < count; i++)
4686 mdss_mdp_ctl_write(ctl, off[i], values[i]);
4687}
4688
4689static void __mdss_mdp_mixer_write_cfg(struct mdss_mdp_mixer *mixer,
4690 struct mdss_mdp_mixer_cfg *cfg)
4691{
4692 u32 vals[NUM_MIXERCFG_REGS] = {0};
4693 int i, mixer_num;
4694
4695 if (!mixer)
4696 return;
4697
4698 mixer_num = __mdss_mdp_mixer_get_hw_num(mixer);
4699
4700 if (cfg) {
4701 for (i = 0; i < NUM_MIXERCFG_REGS; i++)
4702 vals[i] = cfg->config_masks[i];
4703
4704 if (cfg->border_enabled)
4705 vals[0] |= MDSS_MDP_LM_BORDER_COLOR;
4706 if (cfg->cursor_enabled)
4707 vals[0] |= MDSS_MDP_LM_CURSOR_OUT;
4708 }
4709
4710 __mdss_mdp_mixer_write_layer(mixer->ctl, mixer_num,
4711 vals, ARRAY_SIZE(vals));
4712
4713 pr_debug("mixer=%d cfg=0%08x cfg_extn=0x%08x cfg_extn2=0x%08x\n",
4714 mixer->num, vals[0], vals[1], vals[2]);
4715 MDSS_XLOG(mixer->num, vals[0], vals[1], vals[2]);
4716}
4717
4718void mdss_mdp_reset_mixercfg(struct mdss_mdp_ctl *ctl)
4719{
4720 u32 vals[NUM_MIXERCFG_REGS] = {0};
4721 int i, nmixers;
4722
4723 if (!ctl)
4724 return;
4725
4726 nmixers = MDSS_MDP_INTF_MAX_LAYERMIXER + MDSS_MDP_WB_MAX_LAYERMIXER;
4727
4728 for (i = 0; i < nmixers; i++)
4729 __mdss_mdp_mixer_write_layer(ctl, i, vals, ARRAY_SIZE(vals));
4730}
4731
4732bool mdss_mdp_mixer_reg_has_pipe(struct mdss_mdp_mixer *mixer,
4733 struct mdss_mdp_pipe *pipe)
4734{
4735 u32 offs[NUM_MIXERCFG_REGS];
4736 u32 cfgs[NUM_MIXERCFG_REGS];
4737 struct mdss_mdp_mixer_cfg mixercfg;
4738 int i, mixer_num;
4739
4740 if (!mixer)
4741 return false;
4742
4743 memset(&mixercfg, 0, sizeof(mixercfg));
4744
4745 mixer_num = __mdss_mdp_mixer_get_hw_num(mixer);
4746 __mdss_mdp_mixer_get_offsets(mixer_num, offs, NUM_MIXERCFG_REGS);
4747
4748 for (i = 0; i < NUM_MIXERCFG_REGS; i++)
4749 cfgs[i] = mdss_mdp_ctl_read(mixer->ctl, offs[i]);
4750
4751 __mdss_mdp_mixer_update_cfg_masks(pipe->num, pipe->multirect.num, -1,
4752 &mixercfg);
4753 for (i = 0; i < NUM_MIXERCFG_REGS; i++) {
4754 if (cfgs[i] & mixercfg.config_masks[i]) {
4755 MDSS_XLOG(mixer->num, cfgs[0], cfgs[1]);
4756 return true;
4757 }
4758 }
4759
4760 return false;
4761}
4762
4763static void mdss_mdp_mixer_setup(struct mdss_mdp_ctl *master_ctl,
4764 int mixer_mux, bool lm_swap)
4765{
4766 int i, mixer_num;
4767 int stage, screen_state, outsize;
4768 u32 off, blend_op, blend_stage;
4769 u32 mixer_op_mode = 0, bg_alpha_enable = 0;
4770 struct mdss_mdp_mixer_cfg mixercfg;
4771 u32 fg_alpha = 0, bg_alpha = 0;
4772 struct mdss_mdp_pipe *pipe;
4773 struct mdss_mdp_ctl *ctl, *ctl_hw;
4774 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4775 struct mdss_mdp_mixer *mixer_hw = mdss_mdp_mixer_get(master_ctl,
4776 mixer_mux);
4777 struct mdss_mdp_mixer *mixer;
4778
4779 if (!mixer_hw)
4780 return;
4781
4782 ctl = mixer_hw->ctl;
4783 if (!ctl)
4784 return;
4785
4786 ctl_hw = ctl;
4787 mixer_hw->params_changed = 0;
4788
4789 /* check if mixer setup for rotator is needed */
4790 if (mixer_hw->rotator_mode) {
4791 __mdss_mdp_mixer_write_cfg(mixer_hw, NULL);
4792 return;
4793 }
4794
4795 memset(&mixercfg, 0, sizeof(mixercfg));
4796
4797 if (lm_swap) {
4798 if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
4799 mixer = mdss_mdp_mixer_get(master_ctl,
4800 MDSS_MDP_MIXER_MUX_LEFT);
4801 else
4802 mixer = mdss_mdp_mixer_get(master_ctl,
4803 MDSS_MDP_MIXER_MUX_RIGHT);
4804 ctl_hw = mixer->ctl;
4805 } else {
4806 mixer = mixer_hw;
4807 }
4808
4809 /*
4810 * if lm_swap was used on MDP_DUAL_LM_DUAL_DISPLAY then we need to
4811 * reset mixercfg every frame because there might be a stale value
4812 * in mixerfcfg register.
4813 */
4814 if ((ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) &&
4815 is_dsc_compression(&ctl->panel_data->panel_info) &&
4816 ctl->panel_data->panel_info.partial_update_enabled &&
4817 mdss_has_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU))
4818 mdss_mdp_reset_mixercfg(ctl_hw);
4819
4820 if (!mixer->valid_roi) {
4821 /*
4822 * resetting mixer config is specifically needed when split
4823 * mode is MDP_DUAL_LM_SINGLE_DISPLAY but update is only on
4824 * one side.
4825 */
4826 __mdss_mdp_mixer_write_cfg(mixer_hw, NULL);
4827
4828 MDSS_XLOG(mixer->num, mixer_hw->num, XLOG_FUNC_EXIT);
4829 return;
4830 }
4831
4832 trace_mdp_mixer_update(mixer_hw->num);
4833 pr_debug("setup mixer=%d hw=%d\n", mixer->num, mixer_hw->num);
4834 screen_state = ctl->force_screen_state;
4835
4836 outsize = (mixer->roi.h << 16) | mixer->roi.w;
4837 mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
4838
4839 if (screen_state == MDSS_SCREEN_FORCE_BLANK) {
4840 mixercfg.border_enabled = true;
4841 goto update_mixer;
4842 }
4843
4844 pipe = mixer->stage_pipe[MDSS_MDP_STAGE_BASE * MAX_PIPES_PER_STAGE];
4845 if (pipe == NULL) {
4846 mixercfg.border_enabled = true;
4847 } else {
4848 __mdss_mdp_mixer_update_cfg_masks(pipe->num,
4849 pipe->multirect.num, MDSS_MDP_STAGE_BASE,
4850 &mixercfg);
4851
4852 if (pipe->src_fmt->alpha_enable)
4853 bg_alpha_enable = 1;
4854 }
4855
4856 i = MDSS_MDP_STAGE_0 * MAX_PIPES_PER_STAGE;
4857 for (; i < MAX_PIPES_PER_LM; i++) {
4858 pipe = mixer->stage_pipe[i];
4859 if (pipe == NULL)
4860 continue;
4861
4862 stage = i / MAX_PIPES_PER_STAGE;
4863 if (stage != pipe->mixer_stage) {
4864 pr_warn("pipe%d rec%d mixer:%d stage mismatch. pipe->mixer_stage=%d, mixer->stage_pipe=%d multirect_mode=%d. skip staging it\n",
4865 pipe->num, pipe->multirect.num, mixer->num,
4866 pipe->mixer_stage, stage, pipe->multirect.mode);
4867 mixer->stage_pipe[i] = NULL;
4868 continue;
4869 }
4870
4871 /*
4872 * pipe which is staged on both LMs will be tracked through
4873 * left mixer only.
4874 */
4875 if (!pipe->src_split_req || !mixer->is_right_mixer)
4876 mixer->next_pipe_map |= pipe->ndx;
4877
4878 blend_stage = stage - MDSS_MDP_STAGE_0;
4879 off = MDSS_MDP_REG_LM_BLEND_OFFSET(blend_stage);
4880
4881 /*
4882 * Account for additional blending stages
4883 * from MDP v1.5 onwards
4884 */
4885 if (blend_stage > 3)
4886 off += MDSS_MDP_REG_LM_BLEND_STAGE4;
4887 blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
4888 MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
4889 fg_alpha = pipe->alpha;
4890 bg_alpha = 0xFF - pipe->alpha;
4891 /* keep fg alpha */
4892 mixer_op_mode |= 1 << (blend_stage + 1);
4893
4894 switch (pipe->blend_op) {
4895 case BLEND_OP_OPAQUE:
4896
4897 blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
4898 MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
4899
4900 pr_debug("pnum=%d stg=%d op=OPAQUE\n", pipe->num,
4901 stage);
4902 break;
4903
4904 case BLEND_OP_PREMULTIPLIED:
4905 if (pipe->src_fmt->alpha_enable) {
4906 blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
4907 MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
4908 if (fg_alpha != 0xff) {
4909 bg_alpha = fg_alpha;
4910 blend_op |=
4911 MDSS_MDP_BLEND_BG_MOD_ALPHA |
4912 MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
4913 } else {
4914 blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
4915 }
4916 }
4917 pr_debug("pnum=%d stg=%d op=PREMULTIPLIED\n", pipe->num,
4918 stage);
4919 break;
4920
4921 case BLEND_OP_COVERAGE:
4922 if (pipe->src_fmt->alpha_enable) {
4923 blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_PIXEL |
4924 MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
4925 if (fg_alpha != 0xff) {
4926 bg_alpha = fg_alpha;
4927 blend_op |=
4928 MDSS_MDP_BLEND_FG_MOD_ALPHA |
4929 MDSS_MDP_BLEND_FG_INV_MOD_ALPHA |
4930 MDSS_MDP_BLEND_BG_MOD_ALPHA |
4931 MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
4932 } else {
4933 blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
4934 }
4935 }
4936 pr_debug("pnum=%d stg=%d op=COVERAGE\n", pipe->num,
4937 stage);
4938 break;
4939
4940 default:
4941 blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
4942 MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
4943 pr_debug("pnum=%d stg=%d op=NONE\n", pipe->num,
4944 stage);
4945 break;
4946 }
4947
4948 if (!pipe->src_fmt->alpha_enable && bg_alpha_enable)
4949 mixer_op_mode = 0;
4950
4951 __mdss_mdp_mixer_update_cfg_masks(pipe->num,
4952 pipe->multirect.num, stage, &mixercfg);
4953
4954 trace_mdp_sspp_change(pipe);
4955
4956 pr_debug("stg=%d op=%x fg_alpha=%x bg_alpha=%x\n", stage,
4957 blend_op, fg_alpha, bg_alpha);
4958 mdp_mixer_write(mixer_hw,
4959 off + MDSS_MDP_REG_LM_OP_MODE, blend_op);
4960 mdp_mixer_write(mixer_hw,
4961 off + MDSS_MDP_REG_LM_BLEND_FG_ALPHA, fg_alpha);
4962 mdp_mixer_write(mixer_hw,
4963 off + MDSS_MDP_REG_LM_BLEND_BG_ALPHA, bg_alpha);
4964 }
4965
4966 if (mixer->cursor_enabled)
4967 mixercfg.cursor_enabled = true;
4968
4969update_mixer:
4970 mixer_num = __mdss_mdp_mixer_get_hw_num(mixer_hw);
4971 ctl_hw->flush_bits |= BIT(mixer_num < 5 ? 6 + mixer_num : 20);
4972
4973 /* Read GC enable/disable status on LM */
4974 mixer_op_mode |=
4975 (mdp_mixer_read(mixer_hw, MDSS_MDP_REG_LM_OP_MODE) & BIT(0));
4976
4977 if (mixer->src_split_req && mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
4978 mixer_op_mode |= BIT(31);
4979
4980 mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_OP_MODE, mixer_op_mode);
4981
4982 mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_BORDER_COLOR_0,
4983 (mdata->bcolor0 & 0xFFF) | ((mdata->bcolor1 & 0xFFF) << 16));
4984 mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_BORDER_COLOR_1,
4985 mdata->bcolor2 & 0xFFF);
4986
4987 __mdss_mdp_mixer_write_cfg(mixer_hw, &mixercfg);
4988
4989 pr_debug("mixer=%d hw=%d op_mode=0x%08x w=%d h=%d bc0=0x%x bc1=0x%x\n",
4990 mixer->num, mixer_hw->num,
4991 mixer_op_mode, mixer->roi.w, mixer->roi.h,
4992 (mdata->bcolor0 & 0xFFF) | ((mdata->bcolor1 & 0xFFF) << 16),
4993 mdata->bcolor2 & 0xFFF);
4994 MDSS_XLOG(mixer->num, mixer_hw->num,
4995 mixer_op_mode, mixer->roi.h, mixer->roi.w);
4996}
4997
4998int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata,
4999 u32 *mixer_offsets, u32 *dspp_offsets, u32 *pingpong_offsets,
5000 u32 type, u32 len)
5001{
5002 struct mdss_mdp_mixer *head;
5003 u32 i;
5004 int rc = 0;
5005 u32 size = len;
5006
5007 if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
5008 (mdata->wfd_mode == MDSS_MDP_WFD_SHARED))
5009 size++;
5010
5011 head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_mixer) *
5012 size, GFP_KERNEL);
5013
5014 if (!head) {
5015 pr_err("unable to setup mixer type=%d :kzalloc fail\n",
5016 type);
5017 return -ENOMEM;
5018 }
5019
5020 for (i = 0; i < len; i++) {
5021 head[i].type = type;
5022 head[i].base = mdata->mdss_io.base + mixer_offsets[i];
5023 head[i].ref_cnt = 0;
5024 head[i].num = i;
5025 if (type == MDSS_MDP_MIXER_TYPE_INTF && dspp_offsets
5026 && pingpong_offsets) {
5027 if (mdata->ndspp > i)
5028 head[i].dspp_base = mdata->mdss_io.base +
5029 dspp_offsets[i];
5030 head[i].pingpong_base = mdata->mdss_io.base +
5031 pingpong_offsets[i];
5032 }
5033 }
5034
5035 /*
5036 * Duplicate the last writeback mixer for concurrent line and block mode
5037 * operations
5038 */
5039 if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
5040 (mdata->wfd_mode == MDSS_MDP_WFD_SHARED))
5041 head[len] = head[len - 1];
5042
5043 switch (type) {
5044
5045 case MDSS_MDP_MIXER_TYPE_INTF:
5046 mdata->mixer_intf = head;
5047 break;
5048
5049 case MDSS_MDP_MIXER_TYPE_WRITEBACK:
5050 mdata->mixer_wb = head;
5051 break;
5052
5053 default:
5054 pr_err("Invalid mixer type=%d\n", type);
5055 rc = -EINVAL;
5056 break;
5057 }
5058
5059 return rc;
5060}
5061
5062int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata,
5063 u32 *ctl_offsets, u32 len)
5064{
5065 struct mdss_mdp_ctl *head;
5066 struct mutex *shared_lock = NULL;
5067 u32 i;
5068 u32 size = len;
5069
5070 if (mdata->wfd_mode == MDSS_MDP_WFD_SHARED) {
5071 size++;
5072 shared_lock = devm_kzalloc(&mdata->pdev->dev,
5073 sizeof(struct mutex),
5074 GFP_KERNEL);
5075 if (!shared_lock) {
5076 pr_err("unable to allocate mem for mutex\n");
5077 return -ENOMEM;
5078 }
5079 mutex_init(shared_lock);
5080 }
5081
5082 head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_ctl) *
5083 size, GFP_KERNEL);
5084
5085 if (!head) {
5086 pr_err("unable to setup ctl and wb: kzalloc fail\n");
5087 return -ENOMEM;
5088 }
5089
5090 for (i = 0; i < len; i++) {
5091 head[i].num = i;
5092 head[i].base = (mdata->mdss_io.base) + ctl_offsets[i];
5093 head[i].ref_cnt = 0;
5094 }
5095
5096 if (mdata->wfd_mode == MDSS_MDP_WFD_SHARED) {
5097 head[len - 1].shared_lock = shared_lock;
5098 /*
5099 * Allocate a virtual ctl to be able to perform simultaneous
5100 * line mode and block mode operations on the same
5101 * writeback block
5102 */
5103 head[len] = head[len - 1];
5104 head[len].num = head[len - 1].num;
5105 }
5106 mdata->ctl_off = head;
5107
5108 return 0;
5109}
5110
5111int mdss_mdp_wb_addr_setup(struct mdss_data_type *mdata,
5112 u32 num_block_wb, u32 num_intf_wb)
5113{
5114 struct mdss_mdp_writeback *wb;
5115 u32 total, i;
5116
5117 total = num_block_wb + num_intf_wb;
5118 wb = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_writeback) *
5119 total, GFP_KERNEL);
5120 if (!wb)
5121 return -ENOMEM;
5122
5123 for (i = 0; i < total; i++) {
5124 wb[i].num = i;
5125 if (i < num_block_wb) {
5126 wb[i].caps = MDSS_MDP_WB_ROTATOR | MDSS_MDP_WB_WFD;
5127 if (mdss_mdp_is_ubwc_supported(mdata))
5128 wb[i].caps |= MDSS_MDP_WB_UBWC;
5129 } else {
5130 wb[i].caps = MDSS_MDP_WB_WFD | MDSS_MDP_WB_INTF;
5131 }
5132 }
5133
5134 mdata->wb = wb;
5135 mdata->nwb = total;
5136 mutex_init(&mdata->wb_lock);
5137
5138 return 0;
5139}
5140
5141struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux)
5142{
5143 struct mdss_mdp_mixer *mixer = NULL;
5144
5145 if (!ctl) {
5146 pr_err("ctl not initialized\n");
5147 return NULL;
5148 }
5149
5150 switch (mux) {
5151 case MDSS_MDP_MIXER_MUX_DEFAULT:
5152 case MDSS_MDP_MIXER_MUX_LEFT:
5153 mixer = ctl->mixer_left;
5154 break;
5155 case MDSS_MDP_MIXER_MUX_RIGHT:
5156 mixer = ctl->mixer_right;
5157 break;
5158 }
5159
5160 return mixer;
5161}
5162
5163struct mdss_mdp_pipe *mdss_mdp_get_staged_pipe(struct mdss_mdp_ctl *ctl,
5164 int mux, int stage, bool is_right_blend)
5165{
5166 struct mdss_mdp_pipe *pipe = NULL;
5167 struct mdss_mdp_mixer *mixer;
5168 int index = (stage * MAX_PIPES_PER_STAGE) + (int)is_right_blend;
5169
5170 if (!ctl)
5171 return NULL;
5172
5173 WARN_ON(index > MAX_PIPES_PER_LM);
5174
5175 mixer = mdss_mdp_mixer_get(ctl, mux);
5176 if (mixer && (index < MAX_PIPES_PER_LM))
5177 pipe = mixer->stage_pipe[index];
5178
5179 pr_debug("%pS index=%d pipe%d\n", __builtin_return_address(0),
5180 index, pipe ? pipe->num : -1);
5181 return pipe;
5182}
5183
5184int mdss_mdp_get_pipe_flush_bits(struct mdss_mdp_pipe *pipe)
5185{
5186 if (WARN_ON(!pipe || pipe->num >= MDSS_MDP_MAX_SSPP))
5187 return 0;
5188
5189 return BIT(mdp_pipe_hwio[pipe->num].flush_bit);
5190}
5191
5192int mdss_mdp_async_ctl_flush(struct msm_fb_data_type *mfd,
5193 u32 flush_bits)
5194{
5195 struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
5196 struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
5197 struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
5198 int ret = 0;
5199
5200 mutex_lock(&ctl->flush_lock);
5201
5202 mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
5203 if ((!ctl->split_flush_en) && sctl)
5204 mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
5205
5206 mutex_unlock(&ctl->flush_lock);
5207 return ret;
5208}
5209
5210int mdss_mdp_mixer_pipe_update(struct mdss_mdp_pipe *pipe,
5211 struct mdss_mdp_mixer *mixer, int params_changed)
5212{
5213 struct mdss_mdp_ctl *ctl;
5214 int i, j, k;
5215
5216 if (!pipe)
5217 return -EINVAL;
5218 if (!mixer)
5219 return -EINVAL;
5220 ctl = mixer->ctl;
5221 if (!ctl)
5222 return -EINVAL;
5223
5224 if (pipe->mixer_stage >= MDSS_MDP_MAX_STAGE) {
5225 pr_err("invalid mixer stage\n");
5226 return -EINVAL;
5227 }
5228
5229 pr_debug("pnum=%x mixer=%d stage=%d\n", pipe->num, mixer->num,
5230 pipe->mixer_stage);
5231
5232 mutex_lock(&ctl->flush_lock);
5233
5234 if (params_changed) {
5235 mixer->params_changed++;
5236 for (i = MDSS_MDP_STAGE_UNUSED; i < MDSS_MDP_MAX_STAGE; i++) {
5237 j = i * MAX_PIPES_PER_STAGE;
5238
5239 /*
5240 * this could lead to cases where left blend index is
5241 * not populated. For instance, where pipe is spanning
5242 * across layer mixers. But this is handled properly
5243 * within mixer programming code.
5244 */
5245 if (pipe->is_right_blend)
5246 j++;
5247
5248 /* First clear all blend containers for current stage */
5249 for (k = 0; k < MAX_PIPES_PER_STAGE; k++) {
5250 u32 ndx = (i * MAX_PIPES_PER_STAGE) + k;
5251
5252 if (mixer->stage_pipe[ndx] == pipe)
5253 mixer->stage_pipe[ndx] = NULL;
5254 }
5255
5256 /* then stage actual pipe on specific blend container */
5257 if (i == pipe->mixer_stage)
5258 mixer->stage_pipe[j] = pipe;
5259 }
5260 }
5261
5262 ctl->flush_bits |= mdss_mdp_get_pipe_flush_bits(pipe);
5263
5264 mutex_unlock(&ctl->flush_lock);
5265
5266 return 0;
5267}
5268
5269/**
5270 * mdss_mdp_mixer_unstage_all() - Unstage all pipes from mixer
5271 * @mixer: Mixer from which to unstage all pipes
5272 *
5273 * Unstage any pipes that are currently attached to mixer.
5274 *
5275 * NOTE: this will not update the pipe structure, and thus a full
5276 * deinitialization or reconfiguration of all pipes is expected after this call.
5277 */
5278void mdss_mdp_mixer_unstage_all(struct mdss_mdp_mixer *mixer)
5279{
5280 struct mdss_mdp_pipe *tmp;
5281 int i;
5282
5283 if (!mixer)
5284 return;
5285
5286 for (i = 0; i < MAX_PIPES_PER_LM; i++) {
5287 tmp = mixer->stage_pipe[i];
5288 if (tmp) {
5289 mixer->stage_pipe[i] = NULL;
5290 mixer->params_changed++;
5291 tmp->params_changed++;
5292 }
5293 }
5294}
5295
5296int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe,
5297 struct mdss_mdp_mixer *mixer)
5298{
5299 int i, right_blend;
5300
5301 if (!pipe)
5302 return -EINVAL;
5303 if (!mixer)
5304 return -EINVAL;
5305
5306 right_blend = pipe->is_right_blend ? 1 : 0;
5307 i = (pipe->mixer_stage * MAX_PIPES_PER_STAGE) + right_blend;
5308 if ((i < MAX_PIPES_PER_LM) && (pipe == mixer->stage_pipe[i])) {
5309 pr_debug("unstage p%d from %s side of stage=%d lm=%d ndx=%d\n",
5310 pipe->num, right_blend ? "right" : "left",
5311 pipe->mixer_stage, mixer->num, i);
5312 } else {
5313 int stage;
5314
5315 for (i = 0; i < MAX_PIPES_PER_LM; i++) {
5316 if (pipe != mixer->stage_pipe[i])
5317 continue;
5318
5319 stage = i / MAX_PIPES_PER_STAGE;
5320 right_blend = i & 1;
5321
5322 pr_warn("lm=%d pipe #%d stage=%d with %s blend, unstaged from %s side of stage=%d!\n",
5323 mixer->num, pipe->num, pipe->mixer_stage,
5324 pipe->is_right_blend ? "right" : "left",
5325 right_blend ? "right" : "left", stage);
5326 break;
5327 }
5328
5329 /* pipe not found, not a failure */
5330 if (i == MAX_PIPES_PER_LM)
5331 return 0;
5332 }
5333
5334 mixer->params_changed++;
5335 mixer->stage_pipe[i] = NULL;
5336
5337 return 0;
5338}
5339
5340int mdss_mdp_ctl_update_fps(struct mdss_mdp_ctl *ctl)
5341{
5342 struct mdss_panel_info *pinfo;
5343 struct mdss_overlay_private *mdp5_data;
5344 int ret = 0;
5345 int new_fps;
5346
5347 if (!ctl->panel_data || !ctl->mfd)
5348 return -ENODEV;
5349
5350 pinfo = &ctl->panel_data->panel_info;
5351
5352 if (!pinfo->dynamic_fps || !ctl->ops.config_fps_fnc)
5353 return 0;
5354
5355 if (!pinfo->default_fps) {
5356 /* we haven't got any call to update the fps */
5357 return 0;
5358 }
5359
5360 mdp5_data = mfd_to_mdp5_data(ctl->mfd);
5361 if (!mdp5_data)
5362 return -ENODEV;
5363
5364 /*
5365 * Panel info is already updated with the new fps info,
5366 * so we need to lock the data to make sure the panel info
5367 * is not updated while we reconfigure the HW.
5368 */
5369 mutex_lock(&mdp5_data->dfps_lock);
5370
5371 if ((pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_VFP) ||
5372 (pinfo->dfps_update == DFPS_IMMEDIATE_PORCH_UPDATE_MODE_HFP) ||
5373 (pinfo->dfps_update ==
5374 DFPS_IMMEDIATE_MULTI_UPDATE_MODE_CLK_HFP) ||
5375 (pinfo->dfps_update ==
5376 DFPS_IMMEDIATE_MULTI_MODE_HFP_CALC_CLK) ||
5377 pinfo->dfps_update == DFPS_IMMEDIATE_CLK_UPDATE_MODE) {
5378 new_fps = mdss_panel_get_framerate(pinfo,
5379 FPS_RESOLUTION_DEFAULT);
5380 } else {
5381 new_fps = pinfo->new_fps;
5382 }
5383
5384 pr_debug("fps new:%d old:%d\n", new_fps,
5385 pinfo->current_fps);
5386
5387 if (new_fps == pinfo->current_fps) {
5388 pr_debug("FPS is already %d\n", new_fps);
5389 ret = 0;
5390 goto exit;
5391 }
5392
5393 ret = ctl->ops.config_fps_fnc(ctl, new_fps);
5394 if (!ret)
5395 pr_debug("fps set to %d\n", new_fps);
5396 else
5397 pr_err("Failed to configure %d fps rc=%d\n",
5398 new_fps, ret);
5399
5400exit:
5401 mutex_unlock(&mdp5_data->dfps_lock);
5402 return ret;
5403}
5404
5405int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl)
5406{
5407 int ret;
5408 u32 reg_data, flush_data;
5409 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
5410
5411 if (!ctl) {
5412 pr_err("invalid ctl\n");
5413 return -ENODEV;
5414 }
5415
5416 ret = mutex_lock_interruptible(&ctl->lock);
5417 if (ret)
5418 return ret;
5419
5420 if (!mdss_mdp_ctl_is_power_on(ctl)) {
5421 mutex_unlock(&ctl->lock);
5422 return 0;
5423 }
5424
5425 ATRACE_BEGIN("wait_fnc");
5426 if (ctl->ops.wait_fnc)
5427 ret = ctl->ops.wait_fnc(ctl, NULL);
5428 ATRACE_END("wait_fnc");
5429
5430 trace_mdp_commit(ctl);
5431
5432 mdss_mdp_ctl_perf_update(ctl, 0, false);
5433 mdata->bw_limit_pending = false;
5434
5435 if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_103)) {
5436 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
5437 reg_data = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_FLUSH);
5438 flush_data = readl_relaxed(mdata->mdp_base + AHB_CLK_OFFSET);
5439 if ((flush_data & BIT(28)) &&
5440 !(ctl->flush_reg_data & reg_data)) {
5441
5442 flush_data &= ~(BIT(28));
5443 writel_relaxed(flush_data,
5444 mdata->mdp_base + AHB_CLK_OFFSET);
5445 ctl->flush_reg_data = 0;
5446 }
5447 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
5448 }
5449
5450 mutex_unlock(&ctl->lock);
5451 return ret;
5452}
5453
5454int mdss_mdp_display_wait4pingpong(struct mdss_mdp_ctl *ctl, bool use_lock)
5455{
5456 struct mdss_mdp_ctl *sctl = NULL;
5457 int ret;
5458 bool recovery_needed = false;
5459
5460 if (use_lock) {
5461 ret = mutex_lock_interruptible(&ctl->lock);
5462 if (ret)
5463 return ret;
5464 }
5465
5466 if (!mdss_mdp_ctl_is_power_on(ctl) || !ctl->ops.wait_pingpong) {
5467 if (use_lock)
5468 mutex_unlock(&ctl->lock);
5469 return 0;
5470 }
5471
5472 ATRACE_BEGIN("wait_pingpong");
5473 ret = ctl->ops.wait_pingpong(ctl, NULL);
5474 ATRACE_END("wait_pingpong");
5475 if (ret)
5476 recovery_needed = true;
5477
5478 sctl = mdss_mdp_get_split_ctl(ctl);
5479
5480 if (sctl && sctl->ops.wait_pingpong) {
5481 ATRACE_BEGIN("wait_pingpong sctl");
5482 ret = sctl->ops.wait_pingpong(sctl, NULL);
5483 ATRACE_END("wait_pingpong sctl");
5484 if (ret)
5485 recovery_needed = true;
5486 }
5487
5488 ctl->mdata->bw_limit_pending = false;
5489 if (recovery_needed) {
5490 mdss_mdp_ctl_reset(ctl, true);
5491 if (sctl)
5492 mdss_mdp_ctl_reset(sctl, true);
5493
5494 mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DSI_RESET_WRITE_PTR,
5495 NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
5496
5497 pr_debug("pingpong timeout recovery finished\n");
5498 }
5499
5500 if (use_lock)
5501 mutex_unlock(&ctl->lock);
5502
5503 return ret;
5504}
5505
5506static void mdss_mdp_force_border_color(struct mdss_mdp_ctl *ctl)
5507{
5508 struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
5509 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
5510 bool lm_swap = mdss_mdp_is_lm_swap_needed(mdata, ctl);
5511
5512 ctl->force_screen_state = MDSS_SCREEN_FORCE_BLANK;
5513
5514 if (sctl)
5515 sctl->force_screen_state = MDSS_SCREEN_FORCE_BLANK;
5516
5517 mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_LEFT, lm_swap);
5518 mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_RIGHT, lm_swap);
5519
5520 ctl->force_screen_state = MDSS_SCREEN_DEFAULT;
5521 if (sctl)
5522 sctl->force_screen_state = MDSS_SCREEN_DEFAULT;
5523
5524 /*
5525 * Update the params changed for mixer for the next frame to
5526 * configure the mixer setup properly.
5527 */
5528 if (ctl->mixer_left)
5529 ctl->mixer_left->params_changed++;
5530 if (ctl->mixer_right)
5531 ctl->mixer_right->params_changed++;
5532}
5533
5534int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
5535 struct mdss_mdp_commit_cb *commit_cb)
5536{
5537 struct mdss_mdp_ctl *sctl = NULL;
5538 int ret = 0;
5539 bool is_bw_released, split_lm_valid;
5540 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
5541 u32 ctl_flush_bits = 0, sctl_flush_bits = 0;
5542
5543 if (!ctl) {
5544 pr_err("display function not set\n");
5545 return -ENODEV;
5546 }
5547
5548 mutex_lock(&ctl->lock);
5549 pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt);
5550
5551 if (!mdss_mdp_ctl_is_power_on(ctl)) {
5552 mutex_unlock(&ctl->lock);
5553 return 0;
5554 }
5555
5556 split_lm_valid = mdss_mdp_is_both_lm_valid(ctl);
5557
5558 sctl = mdss_mdp_get_split_ctl(ctl);
5559 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
5560
5561 mutex_lock(&ctl->flush_lock);
5562
5563 /*
5564 * We could have released the bandwidth if there were no transactions
5565 * pending, so we want to re-calculate the bandwidth in this situation
5566 */
5567 is_bw_released = !mdss_mdp_ctl_perf_get_transaction_status(ctl);
5568 if (is_bw_released) {
5569 if (sctl)
5570 is_bw_released =
5571 !mdss_mdp_ctl_perf_get_transaction_status(sctl);
5572 }
5573
5574 /*
5575 * left update on any topology or
5576 * any update on MDP_DUAL_LM_SINGLE_DISPLAY topology.
5577 */
5578 if (ctl->mixer_left->valid_roi ||
5579 (is_dual_lm_single_display(ctl->mfd) &&
5580 ctl->mixer_right->valid_roi))
5581 mdss_mdp_ctl_perf_set_transaction_status(ctl,
5582 PERF_SW_COMMIT_STATE, PERF_STATUS_BUSY);
5583
5584 /* right update on MDP_DUAL_LM_DUAL_DISPLAY */
5585 if (sctl && sctl->mixer_left->valid_roi)
5586 mdss_mdp_ctl_perf_set_transaction_status(sctl,
5587 PERF_SW_COMMIT_STATE, PERF_STATUS_BUSY);
5588
5589 if (ctl->mixer_right)
5590 ctl->mixer_right->src_split_req =
5591 mdata->has_src_split && split_lm_valid;
5592
5593 if (is_bw_released || ctl->force_screen_state ||
5594 (ctl->mixer_left->params_changed) ||
5595 (ctl->mixer_right && ctl->mixer_right->params_changed)) {
5596 bool lm_swap = mdss_mdp_is_lm_swap_needed(mdata, ctl);
5597
5598 ATRACE_BEGIN("prepare_fnc");
5599 if (ctl->ops.prepare_fnc)
5600 ret = ctl->ops.prepare_fnc(ctl, arg);
5601 ATRACE_END("prepare_fnc");
5602 if (ret) {
5603 pr_err("error preparing display\n");
5604 mutex_unlock(&ctl->flush_lock);
5605 goto done;
5606 }
5607
5608 ATRACE_BEGIN("mixer_programming");
5609 mdss_mdp_ctl_perf_update(ctl, 1, false);
5610
5611 mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_LEFT, lm_swap);
5612 mdss_mdp_mixer_setup(ctl, MDSS_MDP_MIXER_MUX_RIGHT, lm_swap);
5613
5614 mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, ctl->opmode);
5615 ctl->flush_bits |= BIT(17); /* CTL */
5616
5617 if (sctl) {
5618 mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP,
5619 sctl->opmode);
5620 sctl->flush_bits |= BIT(17);
5621 sctl_flush_bits = sctl->flush_bits;
5622 }
5623 ATRACE_END("mixer_programming");
5624 }
5625
5626 /*
5627 * With partial frame update, enable split display bit only
5628 * when validity of ROI's on both the DSI's are identical.
5629 */
5630 if (sctl)
5631 mdss_mdp_ctl_split_display_enable(split_lm_valid, ctl, sctl);
5632
5633 ATRACE_BEGIN("postproc_programming");
5634 if (ctl->is_video_mode && ctl->mfd && ctl->mfd->dcm_state != DTM_ENTER)
5635 /* postprocessing setup, including dspp */
5636 mdss_mdp_pp_setup_locked(ctl);
5637
5638 if (sctl) {
5639 if (ctl->split_flush_en) {
5640 ctl->flush_bits |= sctl->flush_bits;
5641 sctl->flush_bits = 0;
5642 sctl_flush_bits = 0;
5643 } else {
5644 sctl_flush_bits = sctl->flush_bits;
5645 }
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305646 }
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305647 ctl_flush_bits = ctl->flush_bits;
5648
5649 ATRACE_END("postproc_programming");
5650
5651 mutex_unlock(&ctl->flush_lock);
5652
5653 ATRACE_BEGIN("frame_ready");
5654 mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_CFG_DONE);
5655 if (commit_cb)
5656 commit_cb->commit_cb_fnc(
5657 MDP_COMMIT_STAGE_SETUP_DONE,
5658 commit_cb->data);
5659 ret = mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
5660
5661 /*
5662 * When wait for fence timed out, driver ignores the fences
5663 * for signalling. Hardware needs to access only on the buffers
5664 * that are valid and driver needs to ensure it. This function
5665 * would set the mixer state to border when there is timeout.
5666 */
5667 if (ret == NOTIFY_BAD) {
5668 mdss_mdp_force_border_color(ctl);
5669 ctl_flush_bits |= (ctl->flush_bits | BIT(17));
5670 if (sctl && (!ctl->split_flush_en))
5671 sctl_flush_bits |= (sctl->flush_bits | BIT(17));
5672 ret = 0;
5673 }
5674
5675 ATRACE_END("frame_ready");
5676
5677 if (ctl->ops.wait_pingpong && !mdata->serialize_wait4pp)
5678 mdss_mdp_display_wait4pingpong(ctl, false);
5679
5680 /* Moved pp programming to post ping pong */
5681 if (!ctl->is_video_mode && ctl->mfd &&
5682 ctl->mfd->dcm_state != DTM_ENTER) {
5683 /* postprocessing setup, including dspp */
5684 mutex_lock(&ctl->flush_lock);
5685 mdss_mdp_pp_setup_locked(ctl);
5686 if (sctl) {
5687 if (ctl->split_flush_en) {
5688 ctl->flush_bits |= sctl->flush_bits;
5689 sctl->flush_bits = 0;
5690 sctl_flush_bits = 0;
5691 } else {
Harsh Sahuc061ae42016-12-20 15:44:46 -08005692 sctl_flush_bits |= sctl->flush_bits;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305693 }
5694 }
Harsh Sahuc061ae42016-12-20 15:44:46 -08005695 ctl_flush_bits |= ctl->flush_bits;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305696 mutex_unlock(&ctl->flush_lock);
5697 }
5698 /*
5699 * if serialize_wait4pp is false then roi_bkup used in wait4pingpong
5700 * will be of previous frame as expected.
5701 */
5702 ctl->roi_bkup.w = ctl->roi.w;
5703 ctl->roi_bkup.h = ctl->roi.h;
5704
5705 /*
5706 * update roi of panel_info which will be
5707 * used by dsi to set col_page addr of panel.
5708 */
5709 if (ctl->panel_data &&
5710 ctl->panel_data->panel_info.partial_update_enabled) {
5711
5712 if (is_pingpong_split(ctl->mfd)) {
5713 bool pp_split = false;
5714 struct mdss_rect l_roi, r_roi, temp = {0};
5715 u32 opmode = mdss_mdp_ctl_read(ctl,
5716 MDSS_MDP_REG_CTL_TOP) & ~0xF0; /* clear OUT_SEL */
5717 /*
5718 * with pp split enabled, it is a requirement that both
5719 * panels share equal load, so split-point is center.
5720 */
5721 u32 left_panel_w = left_lm_w_from_mfd(ctl->mfd) / 2;
5722
5723 mdss_rect_split(&ctl->roi, &l_roi, &r_roi,
5724 left_panel_w);
5725
5726 /*
5727 * If update is only on left panel then we still send
5728 * zeroed out right panel ROIs to DSI driver. Based on
5729 * zeroed ROI, DSI driver identifies which panel is not
5730 * transmitting.
5731 */
5732 ctl->panel_data->panel_info.roi = l_roi;
5733 ctl->panel_data->next->panel_info.roi = r_roi;
5734
5735 /* based on the roi, update ctl topology */
5736 if (!mdss_rect_cmp(&temp, &l_roi) &&
5737 !mdss_rect_cmp(&temp, &r_roi)) {
5738 /* left + right */
5739 opmode |= (ctl->intf_num << 4);
5740 pp_split = true;
5741 } else if (mdss_rect_cmp(&temp, &l_roi)) {
5742 /* right only */
5743 opmode |= (ctl->slave_intf_num << 4);
5744 pp_split = false;
5745 } else {
5746 /* left only */
5747 opmode |= (ctl->intf_num << 4);
5748 pp_split = false;
5749 }
5750
5751 mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, opmode);
5752
5753 mdss_mdp_ctl_pp_split_display_enable(pp_split, ctl);
5754 } else {
5755 /*
5756 * if single lm update on 3D mux topology, clear it.
5757 */
5758 if ((is_dual_lm_single_display(ctl->mfd)) &&
5759 (ctl->opmode & MDSS_MDP_CTL_OP_PACK_3D_ENABLE) &&
5760 (!mdss_mdp_is_both_lm_valid(ctl))) {
5761
5762 u32 opmode = mdss_mdp_ctl_read(ctl,
5763 MDSS_MDP_REG_CTL_TOP);
5764 opmode &= ~(0xF << 19); /* clear 3D Mux */
5765
5766 mdss_mdp_ctl_write(ctl,
5767 MDSS_MDP_REG_CTL_TOP, opmode);
5768 }
5769
5770 ctl->panel_data->panel_info.roi = ctl->roi;
5771 if (sctl && sctl->panel_data)
5772 sctl->panel_data->panel_info.roi = sctl->roi;
5773 }
5774 }
5775
5776 if (commit_cb)
5777 commit_cb->commit_cb_fnc(MDP_COMMIT_STAGE_READY_FOR_KICKOFF,
5778 commit_cb->data);
5779
5780 if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC) &&
5781 !bitmap_empty(mdata->bwc_enable_map, MAX_DRV_SUP_PIPES))
5782 mdss_mdp_bwcpanic_ctrl(mdata, true);
5783
5784 ATRACE_BEGIN("flush_kickoff");
Raviteja Tamatam893b5d22017-03-21 14:33:01 +05305785
5786 MDSS_XLOG(ctl->intf_num, ctl_flush_bits, sctl_flush_bits,
5787 mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_FLUSH), split_lm_valid);
5788
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305789 mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush_bits);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305790 if (sctl && sctl_flush_bits) {
5791 mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH,
5792 sctl_flush_bits);
5793 sctl->flush_bits = 0;
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305794 }
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305795
5796 MDSS_XLOG(ctl->intf_num, ctl_flush_bits, sctl_flush_bits,
5797 split_lm_valid);
5798 wmb(); /* ensure write is finished before progressing */
5799 ctl->flush_reg_data = ctl_flush_bits;
5800 ctl->flush_bits = 0;
5801
5802 mdss_mdp_mixer_update_pipe_map(ctl, MDSS_MDP_MIXER_MUX_LEFT);
5803 mdss_mdp_mixer_update_pipe_map(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
5804
5805 /* right-only kickoff */
5806 if (!ctl->mixer_left->valid_roi &&
5807 sctl && sctl->mixer_left->valid_roi) {
5808 /*
5809 * Separate kickoff on DSI1 is needed only when we have
5810 * ONLY right half updating on a dual DSI panel
5811 */
5812 if (sctl->ops.display_fnc)
5813 ret = sctl->ops.display_fnc(sctl, arg);
5814 } else {
5815 if (ctl->ops.display_fnc)
5816 ret = ctl->ops.display_fnc(ctl, arg); /* DSI0 kickoff */
5817 }
5818
5819 if (ret)
5820 pr_warn("ctl %d error displaying frame\n", ctl->num);
5821
Xu Yang2057d112017-02-15 09:29:27 +08005822 /* update backlight in commit */
Xu Yang6ffafbf2017-04-06 15:27:12 +08005823 if (ctl->intf_type == MDSS_INTF_DSI && !ctl->is_video_mode &&
5824 ctl->mfd && ctl->mfd->bl_extn_level > 0) {
Xu Yang2057d112017-02-15 09:29:27 +08005825 if (!IS_CALIB_MODE_BL(ctl->mfd) && (!ctl->mfd->ext_bl_ctrl ||
5826 !ctl->mfd->bl_level)) {
5827 mutex_lock(&ctl->mfd->bl_lock);
5828 mdss_fb_set_backlight(ctl->mfd,
5829 ctl->mfd->bl_extn_level);
5830 mutex_unlock(&ctl->mfd->bl_lock);
5831 }
5832 }
5833
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305834 ctl->play_cnt++;
5835 ATRACE_END("flush_kickoff");
5836
5837done:
5838 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
5839
5840 mutex_unlock(&ctl->lock);
5841
5842 return ret;
5843}
5844
5845void mdss_mdp_ctl_notifier_register(struct mdss_mdp_ctl *ctl,
5846 struct notifier_block *notifier)
5847{
5848 struct mdss_mdp_ctl *sctl;
5849
5850 blocking_notifier_chain_register(&ctl->notifier_head, notifier);
5851
5852 sctl = mdss_mdp_get_split_ctl(ctl);
5853 if (sctl)
5854 blocking_notifier_chain_register(&sctl->notifier_head,
5855 notifier);
5856}
5857
5858void mdss_mdp_ctl_notifier_unregister(struct mdss_mdp_ctl *ctl,
5859 struct notifier_block *notifier)
5860{
5861 struct mdss_mdp_ctl *sctl;
5862
5863 blocking_notifier_chain_unregister(&ctl->notifier_head, notifier);
5864
5865 sctl = mdss_mdp_get_split_ctl(ctl);
5866 if (sctl)
5867 blocking_notifier_chain_unregister(&sctl->notifier_head,
5868 notifier);
5869}
5870
5871int mdss_mdp_ctl_notify(struct mdss_mdp_ctl *ctl, int event)
5872{
5873 return blocking_notifier_call_chain(&ctl->notifier_head, event, ctl);
5874}
5875
5876int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id)
5877{
5878 int i;
5879 struct mdss_mdp_ctl *ctl;
5880 struct mdss_data_type *mdata;
5881 u32 mixer_cnt = 0;
5882
5883 mutex_lock(&mdss_mdp_ctl_lock);
5884 mdata = mdss_mdp_get_mdata();
5885 for (i = 0; i < mdata->nctl; i++) {
5886 ctl = mdata->ctl_off + i;
5887 if ((mdss_mdp_ctl_is_power_on(ctl)) && (ctl->mfd) &&
5888 (ctl->mfd->index == fb_num)) {
5889 if (ctl->mixer_left) {
5890 mixer_id[mixer_cnt] = ctl->mixer_left->num;
5891 mixer_cnt++;
5892 }
5893 if (mixer_cnt && ctl->mixer_right) {
5894 mixer_id[mixer_cnt] = ctl->mixer_right->num;
5895 mixer_cnt++;
5896 }
5897 if (mixer_cnt)
5898 break;
5899 }
5900 }
5901 mutex_unlock(&mdss_mdp_ctl_lock);
5902 return mixer_cnt;
5903}
5904
5905/**
5906 * @mdss_mdp_ctl_mixer_switch() - return ctl mixer of @return_type
5907 * @ctl: Pointer to ctl structure to be switched.
5908 * @return_type: wb_type of the ctl to be switched to.
5909 *
5910 * Virtual mixer switch should be performed only when there is no
5911 * dedicated wfd block and writeback block is shared.
5912 */
5913struct mdss_mdp_ctl *mdss_mdp_ctl_mixer_switch(struct mdss_mdp_ctl *ctl,
5914 u32 return_type)
5915{
5916 int i;
5917 struct mdss_data_type *mdata = ctl->mdata;
5918
5919 if (ctl->wb_type == return_type) {
5920 mdata->mixer_switched = false;
5921 return ctl;
5922 }
5923 for (i = 0; i <= mdata->nctl; i++) {
5924 if (mdata->ctl_off[i].wb_type == return_type) {
5925 pr_debug("switching mixer from ctl=%d to ctl=%d\n",
5926 ctl->num, mdata->ctl_off[i].num);
5927 mdata->mixer_switched = true;
5928 return mdata->ctl_off + i;
5929 }
5930 }
5931 pr_err("unable to switch mixer to type=%d\n", return_type);
5932 return NULL;
5933}
5934
5935static int __mdss_mdp_mixer_handoff_helper(struct mdss_mdp_mixer *mixer,
5936 struct mdss_mdp_pipe *pipe)
5937{
5938 int rc = 0;
5939 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
5940 u32 right_blend = 0;
5941
5942 if (!mixer) {
5943 rc = -EINVAL;
5944 goto error;
5945 }
5946
5947 /*
5948 * It is possible to have more the one pipe staged on a single
5949 * layer mixer at same staging level.
5950 */
5951 if (mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED] != NULL) {
5952 if (mdata->mdp_rev < MDSS_MDP_HW_REV_103) {
5953 pr_err("More than one pipe staged on mixer num %d\n",
5954 mixer->num);
5955 rc = -EINVAL;
5956 goto error;
5957 } else if (mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED + 1] !=
5958 NULL) {
5959 pr_err("More than two pipe staged on mixer num %d\n",
5960 mixer->num);
5961 rc = -EINVAL;
5962 goto error;
5963 } else {
5964 right_blend = 1;
5965 }
5966 }
5967
5968 pr_debug("Staging pipe num %d on mixer num %d\n",
5969 pipe->num, mixer->num);
5970 mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED + right_blend] = pipe;
5971 pipe->mixer_left = mixer;
5972 pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
5973
5974error:
5975 return rc;
5976}
5977
5978/**
5979 * mdss_mdp_mixer_handoff() - Stages a given pipe on the appropriate mixer
5980 * @ctl: pointer to the control structure associated with the overlay device.
5981 * @num: the mixer number on which the pipe needs to be staged.
5982 * @pipe: pointer to the pipe to be staged.
5983 *
5984 * Function stages a given pipe on either the left mixer or the right mixer
5985 * for the control structre based on the mixer number. If the input mixer
5986 * number does not match either of the mixers then an error is returned.
5987 * This function is called during overlay handoff when certain pipes are
5988 * already staged by the bootloader.
5989 */
5990int mdss_mdp_mixer_handoff(struct mdss_mdp_ctl *ctl, u32 num,
5991 struct mdss_mdp_pipe *pipe)
5992{
5993 int rc = 0;
5994 struct mdss_mdp_mixer *mx_left = ctl->mixer_left;
5995 struct mdss_mdp_mixer *mx_right = ctl->mixer_right;
5996
5997 /*
5998 * For performance calculations, stage the handed off pipe
5999 * as MDSS_MDP_STAGE_UNUSED
6000 */
6001 if (mx_left && (mx_left->num == num)) {
6002 rc = __mdss_mdp_mixer_handoff_helper(mx_left, pipe);
6003 } else if (mx_right && (mx_right->num == num)) {
6004 rc = __mdss_mdp_mixer_handoff_helper(mx_right, pipe);
6005 } else {
6006 pr_err("pipe num %d staged on unallocated mixer num %d\n",
6007 pipe->num, num);
6008 rc = -EINVAL;
6009 }
6010
6011 return rc;
6012}
6013
6014struct mdss_mdp_writeback *mdss_mdp_wb_alloc(u32 caps, u32 reg_index)
6015{
6016 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
6017 struct mdss_mdp_writeback *wb = NULL;
6018 int i;
6019 bool wb_virtual_on;
6020
6021 wb_virtual_on = (mdata->nctl == mdata->nwb_offsets);
6022
6023 if (wb_virtual_on && reg_index >= mdata->nwb_offsets)
6024 return NULL;
6025
6026 mutex_lock(&mdata->wb_lock);
6027
6028 for (i = 0; i < mdata->nwb; i++) {
6029 wb = mdata->wb + i;
6030 if ((wb->caps & caps) &&
6031 (atomic_read(&wb->kref.refcount) == 0)) {
6032 kref_init(&wb->kref);
6033 break;
6034 }
6035 wb = NULL;
6036 }
6037 mutex_unlock(&mdata->wb_lock);
6038
6039 if (wb) {
6040 wb->base = mdata->mdss_io.base;
6041 if (wb_virtual_on)
6042 wb->base += mdata->wb_offsets[reg_index];
6043 else
6044 wb->base += mdata->wb_offsets[i];
6045 }
6046
6047 return wb;
6048}
6049
6050bool mdss_mdp_is_wb_mdp_intf(u32 num, u32 reg_index)
6051{
6052 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
6053 struct mdss_mdp_writeback *wb = NULL;
6054 bool wb_virtual_on;
6055
6056 wb_virtual_on = (mdata->nctl == mdata->nwb_offsets);
6057
6058 if (num >= mdata->nwb || (wb_virtual_on && reg_index >=
6059 mdata->nwb_offsets))
6060 return false;
6061
6062 wb = mdata->wb + num;
6063 if (!wb)
6064 return false;
6065
6066 return (wb->caps & MDSS_MDP_WB_INTF) ? true : false;
6067}
6068
6069struct mdss_mdp_writeback *mdss_mdp_wb_assign(u32 num, u32 reg_index)
6070{
6071 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
6072 struct mdss_mdp_writeback *wb = NULL;
6073 bool wb_virtual_on;
6074
6075 wb_virtual_on = (mdata->nctl == mdata->nwb_offsets);
6076
6077 if (num >= mdata->nwb)
6078 return NULL;
6079
6080 if (wb_virtual_on && reg_index >= mdata->nwb_offsets)
6081 return NULL;
6082
6083 mutex_lock(&mdata->wb_lock);
6084 wb = mdata->wb + num;
6085 if (atomic_read(&wb->kref.refcount) == 0)
6086 kref_init(&wb->kref);
6087 else
6088 wb = NULL;
6089 mutex_unlock(&mdata->wb_lock);
6090
6091 if (!wb)
6092 return NULL;
6093
6094 wb->base = mdata->mdss_io.base;
6095 if (wb_virtual_on)
6096 wb->base += mdata->wb_offsets[reg_index];
6097 else
6098 wb->base += mdata->wb_offsets[num];
6099
6100 return wb;
6101}
6102
6103static void mdss_mdp_wb_release(struct kref *kref)
6104{
6105 struct mdss_mdp_writeback *wb =
6106 container_of(kref, struct mdss_mdp_writeback, kref);
6107
6108 if (!wb)
6109 return;
6110
6111 wb->base = NULL;
6112}
6113
6114void mdss_mdp_wb_free(struct mdss_mdp_writeback *wb)
6115{
6116 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
6117
6118 if (kref_put_mutex(&wb->kref, mdss_mdp_wb_release,
6119 &mdata->wb_lock))
6120 mutex_unlock(&mdata->wb_lock);
6121}