blob: 8c8d28ef5b0bcb357f5f0264bdfd2e575cac5f3d [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/*
2 * MDSS MDP Interface (used by framebuffer core)
3 *
4 * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
5 * Copyright (C) 2007 Google Incorporated
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#define pr_fmt(fmt) "%s: " fmt, __func__
18
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
22#include <linux/hrtimer.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/iommu.h>
28#include <linux/iopoll.h>
29#include <linux/of.h>
30#include <linux/of_address.h>
31#include <linux/pm.h>
32#include <linux/pm_runtime.h>
33#include <linux/regulator/consumer.h>
34#include <linux/regulator/rpm-smd-regulator.h>
35#include <linux/module.h>
36#include <linux/mutex.h>
37#include <linux/sched.h>
38#include <linux/time.h>
39#include <linux/spinlock.h>
40#include <linux/semaphore.h>
41#include <linux/uaccess.h>
42#include <linux/clk/msm-clk.h>
43#include <linux/irqdomain.h>
44#include <linux/irq.h>
45
46#include <linux/msm-bus.h>
47#include <linux/msm-bus-board.h>
48#include <soc/qcom/scm.h>
49#include <soc/qcom/rpm-smd.h>
50
51#include "mdss.h"
52#include "mdss_fb.h"
53#include "mdss_mdp.h"
54#include "mdss_panel.h"
55#include "mdss_debug.h"
56#include "mdss_mdp_debug.h"
57#include "mdss_smmu.h"
58
59#include "mdss_mdp_trace.h"
60
61#define AXI_HALT_TIMEOUT_US 0x4000
62#define AUTOSUSPEND_TIMEOUT_MS 200
63#define DEFAULT_MDP_PIPE_WIDTH 2048
64#define RES_1080p (1088*1920)
65#define RES_UHD (3840*2160)
66
67struct mdss_data_type *mdss_res;
68static u32 mem_protect_sd_ctrl_id;
69
70static int mdss_fb_mem_get_iommu_domain(void)
71{
72 return mdss_smmu_get_domain_id(MDSS_IOMMU_DOMAIN_UNSECURE);
73}
74
75struct msm_mdp_interface mdp5 = {
76 .init_fnc = mdss_mdp_overlay_init,
77 .fb_mem_get_iommu_domain = mdss_fb_mem_get_iommu_domain,
78 .fb_stride = mdss_mdp_fb_stride,
79 .check_dsi_status = mdss_check_dsi_ctrl_status,
80 .get_format_params = mdss_mdp_get_format_params,
81};
82
83#define IB_QUOTA 2000000000
84#define AB_QUOTA 2000000000
85
86#define MAX_AXI_PORT_COUNT 3
87
88#define MEM_PROTECT_SD_CTRL 0xF
89#define MEM_PROTECT_SD_CTRL_FLAT 0x14
90
91static DEFINE_SPINLOCK(mdp_lock);
92static DEFINE_SPINLOCK(mdss_mdp_intr_lock);
93static DEFINE_MUTEX(mdp_clk_lock);
94static DEFINE_MUTEX(mdp_iommu_ref_cnt_lock);
95static DEFINE_MUTEX(mdp_fs_idle_pc_lock);
96
97static struct mdss_panel_intf pan_types[] = {
98 {"dsi", MDSS_PANEL_INTF_DSI},
99 {"edp", MDSS_PANEL_INTF_EDP},
100 {"hdmi", MDSS_PANEL_INTF_HDMI},
101};
102static char mdss_mdp_panel[MDSS_MAX_PANEL_LEN];
103
104struct mdss_hw mdss_mdp_hw = {
105 .hw_ndx = MDSS_HW_MDP,
106 .ptr = NULL,
107 .irq_handler = mdss_mdp_isr,
108};
109
110/* define for h/w block with external driver */
111struct mdss_hw mdss_misc_hw = {
112 .hw_ndx = MDSS_HW_MISC,
113 .ptr = NULL,
114 .irq_handler = NULL,
115};
116
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530117#ifdef CONFIG_QCOM_BUS_SCALING
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530118#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
119 { \
120 .src = MSM_BUS_MASTER_AMPSS_M0, \
121 .dst = MSM_BUS_SLAVE_DISPLAY_CFG, \
122 .ab = (ab_val), \
123 .ib = (ib_val), \
124 }
125
126#define BUS_VOTE_19_MHZ 153600000
127#define BUS_VOTE_40_MHZ 320000000
128#define BUS_VOTE_80_MHZ 640000000
129
130static struct msm_bus_vectors mdp_reg_bus_vectors[] = {
131 MDP_REG_BUS_VECTOR_ENTRY(0, 0),
132 MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
133 MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_40_MHZ),
134 MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_80_MHZ),
135};
136static struct msm_bus_paths mdp_reg_bus_usecases[ARRAY_SIZE(
137 mdp_reg_bus_vectors)];
138static struct msm_bus_scale_pdata mdp_reg_bus_scale_table = {
139 .usecase = mdp_reg_bus_usecases,
140 .num_usecases = ARRAY_SIZE(mdp_reg_bus_usecases),
141 .name = "mdss_reg",
142 .active_only = true,
143};
144#endif
145
146u32 invalid_mdp107_wb_output_fmts[] = {
147 MDP_XRGB_8888,
148 MDP_RGBX_8888,
149 MDP_BGRX_8888,
150};
151
152/*
153 * struct intr_call - array of intr handlers
154 * @func: intr handler
155 * @arg: requested argument to the handler
156 */
157struct intr_callback {
158 void (*func)(void *);
159 void *arg;
160};
161
162/*
163 * struct mdss_mdp_intr_reg - array of MDP intr register sets
164 * @clr_off: offset to CLEAR reg
165 * @en_off: offset to ENABLE reg
166 * @status_off: offset to STATUS reg
167 */
168struct mdss_mdp_intr_reg {
169 u32 clr_off;
170 u32 en_off;
171 u32 status_off;
172};
173
174/*
175 * struct mdss_mdp_irq - maps each irq with i/f
176 * @intr_type: type of interface
177 * @intf_num: i/f the irq is associated with
178 * @irq_mask: corresponding bit in the reg set
179 * @reg_idx: which reg set to program
180 */
181struct mdss_mdp_irq {
182 u32 intr_type;
183 u32 intf_num;
184 u32 irq_mask;
185 u32 reg_idx;
186};
187
188static struct mdss_mdp_intr_reg mdp_intr_reg[] = {
189 { MDSS_MDP_REG_INTR_CLEAR, MDSS_MDP_REG_INTR_EN,
190 MDSS_MDP_REG_INTR_STATUS },
191 { MDSS_MDP_REG_INTR2_CLEAR, MDSS_MDP_REG_INTR2_EN,
192 MDSS_MDP_REG_INTR2_STATUS }
193};
194
195static struct mdss_mdp_irq mdp_irq_map[] = {
196 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 1,
197 MDSS_MDP_INTR_INTF_0_UNDERRUN, 0},
198 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 2,
199 MDSS_MDP_INTR_INTF_1_UNDERRUN, 0},
200 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 3,
201 MDSS_MDP_INTR_INTF_2_UNDERRUN, 0},
202 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 4,
203 MDSS_MDP_INTR_INTF_3_UNDERRUN, 0},
204 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 1,
205 MDSS_MDP_INTR_INTF_0_VSYNC, 0},
206 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 2,
207 MDSS_MDP_INTR_INTF_1_VSYNC, 0},
208 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 3,
209 MDSS_MDP_INTR_INTF_2_VSYNC, 0},
210 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 4,
211 MDSS_MDP_INTR_INTF_3_VSYNC, 0},
212 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 0,
213 MDSS_MDP_INTR_PING_PONG_0_DONE, 0},
214 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 1,
215 MDSS_MDP_INTR_PING_PONG_1_DONE, 0},
216 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 2,
217 MDSS_MDP_INTR_PING_PONG_2_DONE, 0},
218 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 3,
219 MDSS_MDP_INTR_PING_PONG_3_DONE, 0},
220 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 0,
221 MDSS_MDP_INTR_PING_PONG_0_RD_PTR, 0},
222 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 1,
223 MDSS_MDP_INTR_PING_PONG_1_RD_PTR, 0},
224 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 2,
225 MDSS_MDP_INTR_PING_PONG_2_RD_PTR, 0},
226 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 3,
227 MDSS_MDP_INTR_PING_PONG_3_RD_PTR, 0},
228 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 0,
229 MDSS_MDP_INTR_PING_PONG_0_WR_PTR, 0},
230 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 1,
231 MDSS_MDP_INTR_PING_PONG_1_WR_PTR, 0},
232 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 2,
233 MDSS_MDP_INTR_PING_PONG_2_WR_PTR, 0},
234 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 3,
235 MDSS_MDP_INTR_PING_PONG_3_WR_PTR, 0},
236 { MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 0,
237 MDSS_MDP_INTR_WB_0_DONE, 0},
238 { MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 1,
239 MDSS_MDP_INTR_WB_1_DONE, 0},
240 { MDSS_MDP_IRQ_TYPE_WB_WFD_COMP, 0,
241 MDSS_MDP_INTR_WB_2_DONE, 0},
242 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 0,
243 MDSS_MDP_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
244 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 1,
245 MDSS_MDP_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
246 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 2,
247 MDSS_MDP_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
248 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 3,
249 MDSS_MDP_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
250 { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 2,
251 MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1},
252 { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 3,
253 MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1}
254};
255
256static struct intr_callback *mdp_intr_cb;
257
258static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on);
259static int mdss_mdp_parse_dt(struct platform_device *pdev);
260static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev);
261static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev);
262static int mdss_mdp_parse_dt_wb(struct platform_device *pdev);
263static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev);
264static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev);
265static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
266 char *prop_name, u32 *offsets, int len);
267static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
268 char *prop_name);
269static int mdss_mdp_parse_dt_smp(struct platform_device *pdev);
270static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev);
271static int mdss_mdp_parse_dt_misc(struct platform_device *pdev);
272static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev);
273static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev);
274static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev);
275static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev);
276static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev);
277
278static inline u32 is_mdp_irq_enabled(void)
279{
280 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
281 int i;
282
283 for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++)
284 if (mdata->mdp_irq_mask[i] != 0)
285 return 1;
286
287 if (mdata->mdp_hist_irq_mask)
288 return 1;
289
290 if (mdata->mdp_intf_irq_mask)
291 return 1;
292
293 return 0;
294}
295
296u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp)
297{
298 /* The adreno GPU hardware requires that the pitch be aligned to
299 * 32 pixels for color buffers, so for the cases where the GPU
300 * is writing directly to fb0, the framebuffer pitch
301 * also needs to be 32 pixel aligned
302 */
303
304 if (fb_index == 0)
305 return ALIGN(xres, 32) * bpp;
306 else
307 return xres * bpp;
308}
309
310static void mdss_irq_mask(struct irq_data *data)
311{
312 struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
313 unsigned long irq_flags;
314
315 if (!mdata)
316 return;
317
318 pr_debug("irq_domain_mask %lu\n", data->hwirq);
319
320 if (data->hwirq < 32) {
321 spin_lock_irqsave(&mdp_lock, irq_flags);
322 mdata->mdss_util->disable_irq(&mdss_misc_hw);
323 spin_unlock_irqrestore(&mdp_lock, irq_flags);
324 }
325}
326
327static void mdss_irq_unmask(struct irq_data *data)
328{
329 struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
330 unsigned long irq_flags;
331
332 if (!mdata)
333 return;
334
335 pr_debug("irq_domain_unmask %lu\n", data->hwirq);
336
337 if (data->hwirq < 32) {
338 spin_lock_irqsave(&mdp_lock, irq_flags);
339 mdata->mdss_util->enable_irq(&mdss_misc_hw);
340 spin_unlock_irqrestore(&mdp_lock, irq_flags);
341 }
342}
343
344static struct irq_chip mdss_irq_chip = {
345 .name = "mdss",
346 .irq_mask = mdss_irq_mask,
347 .irq_unmask = mdss_irq_unmask,
348};
349
350static int mdss_irq_domain_map(struct irq_domain *d,
351 unsigned int virq, irq_hw_number_t hw)
352{
353 struct mdss_data_type *mdata = d->host_data;
354 /* check here if virq is a valid interrupt line */
355 irq_set_chip_and_handler(virq, &mdss_irq_chip, handle_level_irq);
356 irq_set_chip_data(virq, mdata);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530357 return 0;
358}
359
360const struct irq_domain_ops mdss_irq_domain_ops = {
361 .map = mdss_irq_domain_map,
362 .xlate = irq_domain_xlate_onecell,
363};
364
365static irqreturn_t mdss_irq_handler(int irq, void *ptr)
366{
367 struct mdss_data_type *mdata = ptr;
368 u32 intr;
369
370 if (!mdata)
371 return IRQ_NONE;
372 else if (!mdss_get_irq_enable_state(&mdss_mdp_hw))
373 return IRQ_HANDLED;
374
375 intr = MDSS_REG_READ(mdata, MDSS_REG_HW_INTR_STATUS);
376
377 mdss_mdp_hw.irq_info->irq_buzy = true;
378
379 if (intr & MDSS_INTR_MDP) {
380 spin_lock(&mdp_lock);
381 mdata->mdss_util->irq_dispatch(MDSS_HW_MDP, irq, ptr);
382 spin_unlock(&mdp_lock);
383 intr &= ~MDSS_INTR_MDP;
384 }
385
386 if (intr & MDSS_INTR_DSI0) {
387 mdata->mdss_util->irq_dispatch(MDSS_HW_DSI0, irq, ptr);
388 intr &= ~MDSS_INTR_DSI0;
389 }
390
391 if (intr & MDSS_INTR_DSI1) {
392 mdata->mdss_util->irq_dispatch(MDSS_HW_DSI1, irq, ptr);
393 intr &= ~MDSS_INTR_DSI1;
394 }
395
396 if (intr & MDSS_INTR_EDP) {
397 mdata->mdss_util->irq_dispatch(MDSS_HW_EDP, irq, ptr);
398 intr &= ~MDSS_INTR_EDP;
399 }
400
401 if (intr & MDSS_INTR_HDMI) {
402 mdata->mdss_util->irq_dispatch(MDSS_HW_HDMI, irq, ptr);
403 intr &= ~MDSS_INTR_HDMI;
404 }
405
406 /* route misc. interrupts to external drivers */
407 while (intr) {
408 irq_hw_number_t hwirq = fls(intr) - 1;
409
410 generic_handle_irq(irq_find_mapping(
411 mdata->irq_domain, hwirq));
412 intr &= ~(1 << hwirq);
413 }
414
415 mdss_mdp_hw.irq_info->irq_buzy = false;
416
417 return IRQ_HANDLED;
418}
419
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530420#ifdef CONFIG_QCOM_BUS_SCALING
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530421static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
422{
423 struct msm_bus_scale_pdata *reg_bus_pdata;
424 int i, rc;
425
426 if (!mdata->bus_hdl) {
427 rc = mdss_mdp_parse_dt_bus_scale(mdata->pdev);
428 if (rc) {
429 pr_err("Error in device tree : bus scale\n");
430 return rc;
431 }
432
433 mdata->bus_hdl =
434 msm_bus_scale_register_client(mdata->bus_scale_table);
435 if (!mdata->bus_hdl) {
436 pr_err("bus_client register failed\n");
437 return -EINVAL;
438 }
439
440 pr_debug("register bus_hdl=%x\n", mdata->bus_hdl);
441 }
442
443 if (!mdata->reg_bus_scale_table) {
444 reg_bus_pdata = &mdp_reg_bus_scale_table;
445 for (i = 0; i < reg_bus_pdata->num_usecases; i++) {
446 mdp_reg_bus_usecases[i].num_paths = 1;
447 mdp_reg_bus_usecases[i].vectors =
448 &mdp_reg_bus_vectors[i];
449 }
450 mdata->reg_bus_scale_table = reg_bus_pdata;
451 }
452
453 if (!mdata->reg_bus_hdl) {
454 mdata->reg_bus_hdl =
455 msm_bus_scale_register_client(
456 mdata->reg_bus_scale_table);
457 if (!mdata->reg_bus_hdl)
458 /* Continue without reg_bus scaling */
459 pr_warn("reg_bus_client register failed\n");
460 else
461 pr_debug("register reg_bus_hdl=%x\n",
462 mdata->reg_bus_hdl);
463 }
464
465 if (mdata->hw_rt_bus_scale_table && !mdata->hw_rt_bus_hdl) {
466 mdata->hw_rt_bus_hdl =
467 msm_bus_scale_register_client(
468 mdata->hw_rt_bus_scale_table);
469 if (!mdata->hw_rt_bus_hdl)
470 /* Continue without reg_bus scaling */
471 pr_warn("hw_rt_bus client register failed\n");
472 else
473 pr_debug("register hw_rt_bus=%x\n",
474 mdata->hw_rt_bus_hdl);
475 }
476
477 /*
478 * Following call will not result in actual vote rather update the
479 * current index and ab/ib value. When continuous splash is enabled,
480 * actual vote will happen when splash handoff is done.
481 */
482 return mdss_bus_scale_set_quota(MDSS_MDP_RT, AB_QUOTA, IB_QUOTA);
483}
484
485static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
486{
487 pr_debug("unregister bus_hdl=%x\n", mdata->bus_hdl);
488
489 if (mdata->bus_hdl)
490 msm_bus_scale_unregister_client(mdata->bus_hdl);
491
492 pr_debug("unregister reg_bus_hdl=%x\n", mdata->reg_bus_hdl);
493
494 if (mdata->reg_bus_hdl) {
495 msm_bus_scale_unregister_client(mdata->reg_bus_hdl);
496 mdata->reg_bus_hdl = 0;
497 }
498
499 if (mdata->hw_rt_bus_hdl) {
500 msm_bus_scale_unregister_client(mdata->hw_rt_bus_hdl);
501 mdata->hw_rt_bus_hdl = 0;
502 }
503}
504
505/*
506 * Caller needs to hold mdata->bus_lock lock before calling this function.
507 */
508static int mdss_mdp_bus_scale_set_quota(u64 ab_quota_rt, u64 ab_quota_nrt,
509 u64 ib_quota_rt, u64 ib_quota_nrt)
510{
511 int new_uc_idx;
512 u64 ab_quota[MAX_AXI_PORT_COUNT] = {0, 0};
513 u64 ib_quota[MAX_AXI_PORT_COUNT] = {0, 0};
514 int rc;
515
516 if (mdss_res->bus_hdl < 1) {
517 pr_err("invalid bus handle %d\n", mdss_res->bus_hdl);
518 return -EINVAL;
519 }
520
521 if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt) {
522 new_uc_idx = 0;
523 } else {
524 int i;
525 struct msm_bus_vectors *vect = NULL;
526 struct msm_bus_scale_pdata *bw_table =
527 mdss_res->bus_scale_table;
528 u32 nrt_axi_port_cnt = mdss_res->nrt_axi_port_cnt;
529 u32 total_axi_port_cnt = mdss_res->axi_port_cnt;
530 u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
531 int match_cnt = 0;
532
533 if (!bw_table || !total_axi_port_cnt ||
534 total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
535 pr_err("invalid input\n");
536 return -EINVAL;
537 }
538
539 if (mdss_res->bus_channels) {
540 ib_quota_rt = div_u64(ib_quota_rt,
541 mdss_res->bus_channels);
542 ib_quota_nrt = div_u64(ib_quota_nrt,
543 mdss_res->bus_channels);
544 }
545
546 if (mdss_res->has_fixed_qos_arbiter_enabled ||
547 nrt_axi_port_cnt) {
548
549 ab_quota_rt = div_u64(ab_quota_rt, rt_axi_port_cnt);
550 ab_quota_nrt = div_u64(ab_quota_nrt, nrt_axi_port_cnt);
551
552 for (i = 0; i < total_axi_port_cnt; i++) {
553 if (i < rt_axi_port_cnt) {
554 ab_quota[i] = ab_quota_rt;
555 ib_quota[i] = ib_quota_rt;
556 } else {
557 ab_quota[i] = ab_quota_nrt;
558 ib_quota[i] = ib_quota_nrt;
559 }
560 }
561 } else {
562 ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt,
563 total_axi_port_cnt);
564 ib_quota[0] = ib_quota_rt + ib_quota_nrt;
565
566 for (i = 1; i < total_axi_port_cnt; i++) {
567 ab_quota[i] = ab_quota[0];
568 ib_quota[i] = ib_quota[0];
569 }
570 }
571
572 for (i = 0; i < total_axi_port_cnt; i++) {
573 vect = &bw_table->usecase
574 [mdss_res->curr_bw_uc_idx].vectors[i];
575 /* avoid performing updates for small changes */
576 if ((ab_quota[i] == vect->ab) &&
577 (ib_quota[i] == vect->ib))
578 match_cnt++;
579 }
580
581 if (match_cnt == total_axi_port_cnt) {
582 pr_debug("skip BW vote\n");
583 return 0;
584 }
585
586 new_uc_idx = (mdss_res->curr_bw_uc_idx %
587 (bw_table->num_usecases - 1)) + 1;
588
589 for (i = 0; i < total_axi_port_cnt; i++) {
590 vect = &bw_table->usecase[new_uc_idx].vectors[i];
591 vect->ab = ab_quota[i];
592 vect->ib = ib_quota[i];
593
594 pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
595 new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
596 , i, vect->ab, vect->ib);
597 }
598 }
599 mdss_res->curr_bw_uc_idx = new_uc_idx;
600 mdss_res->ao_bw_uc_idx = new_uc_idx;
601
602 if ((mdss_res->bus_ref_cnt == 0) && mdss_res->curr_bw_uc_idx) {
603 rc = 0;
604 } else { /* vote BW if bus_bw_cnt > 0 or uc_idx is zero */
605 ATRACE_BEGIN("msm_bus_scale_req");
606 rc = msm_bus_scale_client_update_request(mdss_res->bus_hdl,
607 new_uc_idx);
608 ATRACE_END("msm_bus_scale_req");
609 }
610 return rc;
611}
612
613struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
614{
615 struct reg_bus_client *client;
616 static u32 id;
617
618 if (client_name == NULL) {
619 pr_err("client name is null\n");
620 return ERR_PTR(-EINVAL);
621 }
622
623 client = kcalloc(1, sizeof(struct reg_bus_client), GFP_KERNEL);
624 if (!client)
625 return ERR_PTR(-ENOMEM);
626
627 mutex_lock(&mdss_res->reg_bus_lock);
628 strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
629 client->usecase_ndx = VOTE_INDEX_DISABLE;
630 client->id = id;
631 pr_debug("bus vote client %s created:%pK id :%d\n", client_name,
632 client, id);
633 id++;
634 list_add(&client->list, &mdss_res->reg_bus_clist);
635 mutex_unlock(&mdss_res->reg_bus_lock);
636
637 return client;
638}
639
640void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
641{
642 if (!client) {
643 pr_err("reg bus vote: invalid client handle\n");
644 } else {
645 pr_debug("bus vote client %s destroyed:%pK id:%u\n",
646 client->name, client, client->id);
647 mutex_lock(&mdss_res->reg_bus_lock);
648 list_del_init(&client->list);
649 mutex_unlock(&mdss_res->reg_bus_lock);
650 kfree(client);
651 }
652}
653
654int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
655{
656 int ret = 0;
657 bool changed = false;
658 u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
659 struct reg_bus_client *client, *temp_client;
660
661 if (!mdss_res || !mdss_res->reg_bus_hdl || !bus_client)
662 return 0;
663
664 mutex_lock(&mdss_res->reg_bus_lock);
665 bus_client->usecase_ndx = usecase_ndx;
666 list_for_each_entry_safe(client, temp_client, &mdss_res->reg_bus_clist,
667 list) {
668
669 if (client->usecase_ndx < VOTE_INDEX_MAX &&
670 client->usecase_ndx > max_usecase_ndx)
671 max_usecase_ndx = client->usecase_ndx;
672 }
673
674 if (mdss_res->reg_bus_usecase_ndx != max_usecase_ndx) {
675 changed = true;
676 mdss_res->reg_bus_usecase_ndx = max_usecase_ndx;
677 }
678
679 pr_debug("%pS: changed=%d current idx=%d request client %s id:%u idx:%d\n",
680 __builtin_return_address(0), changed, max_usecase_ndx,
681 bus_client->name, bus_client->id, usecase_ndx);
682 MDSS_XLOG(changed, max_usecase_ndx, bus_client->id, usecase_ndx);
683 if (changed)
684 ret = msm_bus_scale_client_update_request(mdss_res->reg_bus_hdl,
685 max_usecase_ndx);
686
687 mutex_unlock(&mdss_res->reg_bus_lock);
688 return ret;
689}
690
691int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
692{
693 int rc = 0;
694 int i;
695 u64 total_ab_rt = 0, total_ib_rt = 0;
696 u64 total_ab_nrt = 0, total_ib_nrt = 0;
697
698 mutex_lock(&mdss_res->bus_lock);
699
700 mdss_res->ab[client] = ab_quota;
701 mdss_res->ib[client] = ib_quota;
702 trace_mdp_perf_update_bus(client, ab_quota, ib_quota);
703
704 for (i = 0; i < MDSS_MAX_BUS_CLIENTS; i++) {
705 if (i == MDSS_MDP_NRT) {
706 total_ab_nrt = mdss_res->ab[i];
707 total_ib_nrt = mdss_res->ib[i];
708 } else {
709 total_ab_rt += mdss_res->ab[i];
710 total_ib_rt = max(total_ib_rt, mdss_res->ib[i]);
711 }
712 }
713
714 rc = mdss_mdp_bus_scale_set_quota(total_ab_rt, total_ab_nrt,
715 total_ib_rt, total_ib_nrt);
716
717 mutex_unlock(&mdss_res->bus_lock);
718
719 return rc;
720}
721#else
722static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
723{
724 return 0;
725}
726
727static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
728{
729}
730
731int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
732{
733 pr_debug("No bus scaling! client=%d ab=%llu ib=%llu\n",
734 client, ab_quota, ib_quota);
735
736 return 0;
737}
738
739struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
740{
741 return NULL;
742}
743
744void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
745{
746}
747
748int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
749{
750 pr_debug("%pS: No reg scaling! usecase=%u\n",
751 __builtin_return_address(0), usecase_ndx);
752
753 return 0;
754}
755#endif
756
757
758static int mdss_mdp_intr2index(u32 intr_type, u32 intf_num)
759{
760 int i;
761
762 for (i = 0; i < ARRAY_SIZE(mdp_irq_map); i++) {
763 if (intr_type == mdp_irq_map[i].intr_type &&
764 intf_num == mdp_irq_map[i].intf_num)
765 return i;
766 }
767 return -EINVAL;
768}
769
770u32 mdss_mdp_get_irq_mask(u32 intr_type, u32 intf_num)
771{
772 int idx = mdss_mdp_intr2index(intr_type, intf_num);
773
774 return (idx < 0) ? 0 : mdp_irq_map[idx].irq_mask;
775}
776
777void mdss_mdp_enable_hw_irq(struct mdss_data_type *mdata)
778{
779 mdata->mdss_util->enable_irq(&mdss_mdp_hw);
780}
781
782void mdss_mdp_disable_hw_irq(struct mdss_data_type *mdata)
783{
784 if (!is_mdp_irq_enabled())
785 mdata->mdss_util->disable_irq(&mdss_mdp_hw);
786}
787
788/* function assumes that mdp is clocked to access hw registers */
789void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
790 u32 intr_type, u32 intf_num)
791{
792 unsigned long irq_flags;
793 int irq_idx;
794 struct mdss_mdp_intr_reg reg;
795 struct mdss_mdp_irq irq;
796
797 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
798 if (irq_idx < 0) {
799 pr_err("invalid irq request\n");
800 return;
801 }
802
803 irq = mdp_irq_map[irq_idx];
804 reg = mdp_intr_reg[irq.reg_idx];
805
806 pr_debug("clearing mdp irq mask=%x\n", irq.irq_mask);
807 spin_lock_irqsave(&mdp_lock, irq_flags);
808 writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
809 spin_unlock_irqrestore(&mdp_lock, irq_flags);
810}
811
812int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
813{
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530814 int irq_idx;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530815 unsigned long irq_flags;
816 int ret = 0;
817 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
818 struct mdss_mdp_intr_reg reg;
819 struct mdss_mdp_irq irq;
820
821 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
822 if (irq_idx < 0) {
823 pr_err("invalid irq request\n");
824 return -EINVAL;
825 }
826
827 irq = mdp_irq_map[irq_idx];
828 reg = mdp_intr_reg[irq.reg_idx];
829
830 spin_lock_irqsave(&mdp_lock, irq_flags);
831 if (mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask) {
832 pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n",
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530833 irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530834 ret = -EBUSY;
835 } else {
836 pr_debug("MDP IRQ mask old=%x new=%x\n",
837 mdata->mdp_irq_mask[irq.reg_idx], irq.irq_mask);
838 mdata->mdp_irq_mask[irq.reg_idx] |= irq.irq_mask;
839 writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
840 writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
841 mdata->mdp_base + reg.en_off);
842 mdata->mdss_util->enable_irq(&mdss_mdp_hw);
843 }
844 spin_unlock_irqrestore(&mdp_lock, irq_flags);
845
846 return ret;
847}
848int mdss_mdp_hist_irq_enable(u32 irq)
849{
850 int ret = 0;
851 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
852
853 if (mdata->mdp_hist_irq_mask & irq) {
854 pr_warn("MDSS MDP Hist IRQ-0x%x is already set, mask=%x\n",
855 irq, mdata->mdp_hist_irq_mask);
856 ret = -EBUSY;
857 } else {
858 pr_debug("mask old=%x new=%x\n",
859 mdata->mdp_hist_irq_mask, irq);
860 mdata->mdp_hist_irq_mask |= irq;
861 writel_relaxed(irq, mdata->mdp_base +
862 MDSS_MDP_REG_HIST_INTR_CLEAR);
863 writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base +
864 MDSS_MDP_REG_HIST_INTR_EN);
865 mdata->mdss_util->enable_irq(&mdss_mdp_hw);
866 }
867
868 return ret;
869}
870
871void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num)
872{
873 int irq_idx;
874 unsigned long irq_flags;
875 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
876 struct mdss_mdp_intr_reg reg;
877 struct mdss_mdp_irq irq;
878
879 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
880 if (irq_idx < 0) {
881 pr_err("invalid irq request\n");
882 return;
883 }
884
885 irq = mdp_irq_map[irq_idx];
886 reg = mdp_intr_reg[irq.reg_idx];
887
888 spin_lock_irqsave(&mdp_lock, irq_flags);
889 if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) {
890 pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
891 irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
892 } else {
893 mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask;
894 writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
895 mdata->mdp_base + reg.en_off);
896 if (!is_mdp_irq_enabled())
897 mdata->mdss_util->disable_irq(&mdss_mdp_hw);
898 }
899 spin_unlock_irqrestore(&mdp_lock, irq_flags);
900}
901
902/* This function is used to check and clear the status of MDP interrupts */
903void mdss_mdp_intr_check_and_clear(u32 intr_type, u32 intf_num)
904{
905 u32 status;
906 int irq_idx;
907 unsigned long irq_flags;
908 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
909 struct mdss_mdp_intr_reg reg;
910 struct mdss_mdp_irq irq;
911
912 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
913 if (irq_idx < 0) {
914 pr_err("invalid irq request\n");
915 return;
916 }
917
918 irq = mdp_irq_map[irq_idx];
919 reg = mdp_intr_reg[irq.reg_idx];
920
921 spin_lock_irqsave(&mdp_lock, irq_flags);
922 status = irq.irq_mask & readl_relaxed(mdata->mdp_base +
923 reg.status_off);
924 if (status) {
925 pr_debug("clearing irq: intr_type:%d, intf_num:%d\n",
926 intr_type, intf_num);
927 writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
928 }
929 spin_unlock_irqrestore(&mdp_lock, irq_flags);
930}
931
932void mdss_mdp_hist_irq_disable(u32 irq)
933{
934 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
935
936 if (!(mdata->mdp_hist_irq_mask & irq)) {
937 pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
938 irq, mdata->mdp_hist_irq_mask);
939 } else {
940 mdata->mdp_hist_irq_mask &= ~irq;
941 writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base +
942 MDSS_MDP_REG_HIST_INTR_EN);
943 if (!is_mdp_irq_enabled())
944 mdata->mdss_util->disable_irq(&mdss_mdp_hw);
945 }
946}
947
948/**
949 * mdss_mdp_irq_disable_nosync() - disable mdp irq
950 * @intr_type: mdp interface type
951 * @intf_num: mdp interface num
952 *
953 * This function is called from interrupt context
954 * mdp_lock is already held at up stream (mdss_irq_handler)
955 * therefore spin_lock(&mdp_lock) is not allowed here
956 *
957 */
958void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num)
959{
960 int irq_idx;
961 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
962 struct mdss_mdp_intr_reg reg;
963 struct mdss_mdp_irq irq;
964
965 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
966 if (irq_idx < 0) {
967 pr_err("invalid irq request\n");
968 return;
969 }
970
971 irq = mdp_irq_map[irq_idx];
972 reg = mdp_intr_reg[irq.reg_idx];
973
974 if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) {
975 pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
976 irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
977 } else {
978 mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask;
979 writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
980 mdata->mdp_base + reg.en_off);
981 if (!is_mdp_irq_enabled())
982 mdata->mdss_util->disable_irq_nosync(&mdss_mdp_hw);
983 }
984}
985
986int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
987 void (*fnc_ptr)(void *), void *arg)
988{
989 unsigned long flags;
990 int index;
991
992 index = mdss_mdp_intr2index(intr_type, intf_num);
993 if (index < 0) {
994 pr_warn("invalid intr type=%u intf_numf_num=%u\n",
995 intr_type, intf_num);
996 return -EINVAL;
997 }
998
999 spin_lock_irqsave(&mdss_mdp_intr_lock, flags);
1000 WARN(mdp_intr_cb[index].func && fnc_ptr,
1001 "replacing current intr callback for ndx=%d\n", index);
1002 mdp_intr_cb[index].func = fnc_ptr;
1003 mdp_intr_cb[index].arg = arg;
1004 spin_unlock_irqrestore(&mdss_mdp_intr_lock, flags);
1005
1006 return 0;
1007}
1008
1009int mdss_mdp_set_intr_callback_nosync(u32 intr_type, u32 intf_num,
1010 void (*fnc_ptr)(void *), void *arg)
1011{
1012 int index;
1013
1014 index = mdss_mdp_intr2index(intr_type, intf_num);
1015 if (index < 0) {
1016 pr_warn("invalid intr Typee=%u intf_num=%u\n",
1017 intr_type, intf_num);
1018 return -EINVAL;
1019 }
1020
1021 WARN(mdp_intr_cb[index].func && fnc_ptr,
1022 "replacing current intr callbackack for ndx=%d\n",
1023 index);
1024 mdp_intr_cb[index].func = fnc_ptr;
1025 mdp_intr_cb[index].arg = arg;
1026
1027 return 0;
1028}
1029
1030static inline void mdss_mdp_intr_done(int index)
1031{
1032 void (*fnc)(void *);
1033 void *arg;
1034
1035 spin_lock(&mdss_mdp_intr_lock);
1036 fnc = mdp_intr_cb[index].func;
1037 arg = mdp_intr_cb[index].arg;
1038 spin_unlock(&mdss_mdp_intr_lock);
1039 if (fnc)
1040 fnc(arg);
1041}
1042
1043irqreturn_t mdss_mdp_isr(int irq, void *ptr)
1044{
1045 struct mdss_data_type *mdata = ptr;
1046 u32 isr, mask, hist_isr, hist_mask;
1047 int i, j;
1048
1049 if (!mdata->clk_ena)
1050 return IRQ_HANDLED;
1051
1052 for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++) {
1053 struct mdss_mdp_intr_reg reg = mdp_intr_reg[i];
1054
1055 isr = readl_relaxed(mdata->mdp_base + reg.status_off);
1056 if (isr == 0)
1057 continue;
1058
1059 mask = readl_relaxed(mdata->mdp_base + reg.en_off);
1060 writel_relaxed(isr, mdata->mdp_base + reg.clr_off);
1061
1062 pr_debug("%s: reg:%d isr=%x mask=%x\n",
1063 __func__, i+1, isr, mask);
1064
1065 isr &= mask;
1066 if (isr == 0)
1067 continue;
1068
1069 for (j = 0; j < ARRAY_SIZE(mdp_irq_map); j++)
1070 if (mdp_irq_map[j].reg_idx == i &&
1071 (isr & mdp_irq_map[j].irq_mask))
1072 mdss_mdp_intr_done(j);
1073 if (!i) {
1074 if (isr & MDSS_MDP_INTR_PING_PONG_0_DONE)
1075 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0,
1076 false);
1077
1078 if (isr & MDSS_MDP_INTR_PING_PONG_1_DONE)
1079 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1,
1080 false);
1081
1082 if (isr & MDSS_MDP_INTR_INTF_0_VSYNC)
1083 mdss_misr_crc_collect(mdata, DISPLAY_MISR_EDP,
1084 true);
1085
1086 if (isr & MDSS_MDP_INTR_INTF_1_VSYNC)
1087 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0,
1088 true);
1089
1090 if (isr & MDSS_MDP_INTR_INTF_2_VSYNC)
1091 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1,
1092 true);
1093
1094 if (isr & MDSS_MDP_INTR_INTF_3_VSYNC)
1095 mdss_misr_crc_collect(mdata, DISPLAY_MISR_HDMI,
1096 true);
1097
1098 if (isr & MDSS_MDP_INTR_WB_0_DONE)
1099 mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
1100 true);
1101
1102 if (isr & MDSS_MDP_INTR_WB_1_DONE)
1103 mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
1104 true);
1105
1106 if (isr & MDSS_MDP_INTR_WB_2_DONE)
1107 mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
1108 true);
1109 }
1110 }
1111
1112 hist_isr = readl_relaxed(mdata->mdp_base +
1113 MDSS_MDP_REG_HIST_INTR_STATUS);
1114 if (hist_isr != 0) {
1115 hist_mask = readl_relaxed(mdata->mdp_base +
1116 MDSS_MDP_REG_HIST_INTR_EN);
1117 writel_relaxed(hist_isr, mdata->mdp_base +
1118 MDSS_MDP_REG_HIST_INTR_CLEAR);
1119 hist_isr &= hist_mask;
1120 if (hist_isr != 0)
1121 mdss_mdp_hist_intr_done(hist_isr);
1122 }
1123
1124 mdss_mdp_video_isr(mdata->video_intf, mdata->nintf);
1125 return IRQ_HANDLED;
1126}
1127
1128static int mdss_mdp_clk_update(u32 clk_idx, u32 enable)
1129{
1130 int ret = -ENODEV;
1131 struct clk *clk = mdss_mdp_get_clk(clk_idx);
1132
1133 if (clk) {
1134 pr_debug("clk=%d en=%d\n", clk_idx, enable);
1135 if (enable) {
1136 if (clk_idx == MDSS_CLK_MDP_VSYNC)
1137 clk_set_rate(clk, 19200000);
1138 ret = clk_prepare_enable(clk);
1139 } else {
1140 clk_disable_unprepare(clk);
1141 ret = 0;
1142 }
1143 }
1144 return ret;
1145}
1146
1147int mdss_mdp_vsync_clk_enable(int enable, bool locked)
1148{
1149 int ret = 0;
1150
1151 pr_debug("clk enable=%d\n", enable);
1152
1153 if (!locked)
1154 mutex_lock(&mdp_clk_lock);
1155
1156 if (mdss_res->vsync_ena != enable) {
1157 mdss_res->vsync_ena = enable;
1158 ret = mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
1159 }
1160
1161 if (!locked)
1162 mutex_unlock(&mdp_clk_lock);
1163 return ret;
1164}
1165
1166void mdss_mdp_set_clk_rate(unsigned long rate)
1167{
1168 struct mdss_data_type *mdata = mdss_res;
1169 unsigned long clk_rate;
1170 struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
1171 unsigned long min_clk_rate;
1172
1173 min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk);
1174
1175 if (clk) {
1176 mutex_lock(&mdp_clk_lock);
1177 if (min_clk_rate < mdata->max_mdp_clk_rate)
1178 clk_rate = clk_round_rate(clk, min_clk_rate);
1179 else
1180 clk_rate = mdata->max_mdp_clk_rate;
1181 if (IS_ERR_VALUE(clk_rate)) {
1182 pr_err("unable to round rate err=%ld\n", clk_rate);
1183 } else if (clk_rate != clk_get_rate(clk)) {
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301184 if (IS_ERR_VALUE((unsigned long)
1185 clk_set_rate(clk, clk_rate)))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301186 pr_err("clk_set_rate failed\n");
1187 else
1188 pr_debug("mdp clk rate=%lu\n", clk_rate);
1189 }
1190 mutex_unlock(&mdp_clk_lock);
1191 } else {
1192 pr_err("mdp src clk not setup properly\n");
1193 }
1194}
1195
1196unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked)
1197{
1198 unsigned long clk_rate = 0;
1199 struct clk *clk = mdss_mdp_get_clk(clk_idx);
1200
1201 if (clk) {
1202 if (!locked)
1203 mutex_lock(&mdp_clk_lock);
1204
1205 clk_rate = clk_get_rate(clk);
1206
1207 if (!locked)
1208 mutex_unlock(&mdp_clk_lock);
1209 }
1210
1211 return clk_rate;
1212}
1213
1214/**
1215 * mdss_bus_rt_bw_vote() -- place bus bandwidth request
1216 * @enable: value of enable or disable
1217 *
1218 * hw_rt table has two entries, 0 and Min Vote (1Mhz)
1219 * while attaching SMMU and for few TZ operations which
1220 * happen at very early stage, we will request Min Vote
1221 * thru this handle.
1222 *
1223 */
1224static int mdss_bus_rt_bw_vote(bool enable)
1225{
1226 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1227 int rc = 0;
1228 bool changed = false;
1229
1230 if (!mdata->hw_rt_bus_hdl || mdata->handoff_pending)
1231 return 0;
1232
1233 if (enable) {
1234 if (mdata->hw_rt_bus_ref_cnt == 0)
1235 changed = true;
1236 mdata->hw_rt_bus_ref_cnt++;
1237 } else {
1238 if (mdata->hw_rt_bus_ref_cnt != 0) {
1239 mdata->hw_rt_bus_ref_cnt--;
1240 if (mdata->hw_rt_bus_ref_cnt == 0)
1241 changed = true;
1242 } else {
1243 pr_warn("%s: bus bw votes are not balanced\n",
1244 __func__);
1245 }
1246 }
1247
1248 pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
1249 __builtin_return_address(0), current->group_leader->comm,
1250 mdata->hw_rt_bus_ref_cnt, changed, enable);
1251
1252 if (changed) {
1253 rc = msm_bus_scale_client_update_request(mdata->hw_rt_bus_hdl,
1254 enable ? 1 : 0);
1255 if (rc)
1256 pr_err("%s: Bus bandwidth vote failed\n", __func__);
1257 }
1258
1259 return rc;
1260}
1261
1262/**
1263 * __mdss_mdp_reg_access_clk_enable - Enable minimum MDSS clocks required
1264 * for register access
1265 */
1266static inline void __mdss_mdp_reg_access_clk_enable(
1267 struct mdss_data_type *mdata, bool enable)
1268{
1269 if (enable) {
1270 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1271 VOTE_INDEX_LOW);
1272 mdss_bus_rt_bw_vote(true);
1273 mdss_mdp_clk_update(MDSS_CLK_AHB, 1);
1274 mdss_mdp_clk_update(MDSS_CLK_AXI, 1);
1275 mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1);
1276 } else {
1277 mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 0);
1278 mdss_mdp_clk_update(MDSS_CLK_AXI, 0);
1279 mdss_mdp_clk_update(MDSS_CLK_AHB, 0);
1280 mdss_bus_rt_bw_vote(false);
1281 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1282 VOTE_INDEX_DISABLE);
1283 }
1284}
1285
1286int __mdss_mdp_vbif_halt(struct mdss_data_type *mdata, bool is_nrt)
1287{
1288 int rc = 0;
1289 void __iomem *base;
1290 u32 halt_ack_mask = BIT(0), status;
1291
1292 /* if not real time vbif */
1293 if (is_nrt)
1294 base = mdata->vbif_nrt_io.base;
1295 else
1296 base = mdata->vbif_io.base;
1297
1298 if (!base) {
1299 /* some targets might not have a nrt port */
1300 goto vbif_done;
1301 }
1302
1303 /* force vbif clock on */
1304 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 1, is_nrt);
1305
1306 /* request halt */
1307 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_AXI_HALT_CTRL0, 1, is_nrt);
1308
1309 rc = readl_poll_timeout(base +
1310 MMSS_VBIF_AXI_HALT_CTRL1, status, (status &
1311 halt_ack_mask),
1312 1000, AXI_HALT_TIMEOUT_US);
1313 if (rc == -ETIMEDOUT) {
1314 pr_err("VBIF axi is not halting. TIMEDOUT.\n");
1315 goto vbif_done;
1316 }
1317
1318 pr_debug("VBIF axi is halted\n");
1319
1320vbif_done:
1321 return rc;
1322}
1323
1324/**
1325 * mdss_mdp_vbif_axi_halt() - Halt MDSS AXI ports
1326 * @mdata: pointer to the global mdss data structure.
1327 *
1328 * This function can be called during deep suspend, display off or for
1329 * debugging purposes. On success it should be assumed that AXI ports connected
1330 * to RT VBIF are in idle state and would not fetch any more data.
1331 */
1332static void mdss_mdp_vbif_axi_halt(struct mdss_data_type *mdata)
1333{
1334 __mdss_mdp_reg_access_clk_enable(mdata, true);
1335
1336 /* real time ports */
1337 __mdss_mdp_vbif_halt(mdata, false);
1338 /* non-real time ports */
1339 __mdss_mdp_vbif_halt(mdata, true);
1340
1341 __mdss_mdp_reg_access_clk_enable(mdata, false);
1342}
1343
1344int mdss_iommu_ctrl(int enable)
1345{
1346 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1347 int rc = 0;
1348
1349 mutex_lock(&mdp_iommu_ref_cnt_lock);
1350 pr_debug("%pS: enable:%d ref_cnt:%d attach:%d hoff:%d\n",
1351 __builtin_return_address(0), enable, mdata->iommu_ref_cnt,
1352 mdata->iommu_attached, mdata->handoff_pending);
1353
1354 if (enable) {
1355 /*
1356 * delay iommu attach until continuous splash screen has
1357 * finished handoff, as it may still be working with phys addr
1358 */
1359 if (!mdata->iommu_attached && !mdata->handoff_pending) {
1360 mdss_bus_rt_bw_vote(true);
1361 rc = mdss_smmu_attach(mdata);
1362 }
1363 mdata->iommu_ref_cnt++;
1364 } else {
1365 if (mdata->iommu_ref_cnt) {
1366 mdata->iommu_ref_cnt--;
1367 if (mdata->iommu_ref_cnt == 0) {
1368 rc = mdss_smmu_detach(mdata);
1369 mdss_bus_rt_bw_vote(false);
1370 }
1371 } else {
1372 pr_err("unbalanced iommu ref\n");
1373 }
1374 }
1375 mutex_unlock(&mdp_iommu_ref_cnt_lock);
1376
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301377 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301378 return rc;
1379 else
1380 return mdata->iommu_ref_cnt;
1381}
1382
1383static void mdss_mdp_memory_retention_enter(void)
1384{
1385 struct clk *mdss_mdp_clk = NULL;
1386 struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
1387
1388 if (mdp_vote_clk) {
1389 mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
1390 if (mdss_mdp_clk) {
1391 clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
1392 clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET);
1393 clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH);
1394 }
1395 }
1396}
1397
1398static void mdss_mdp_memory_retention_exit(void)
1399{
1400 struct clk *mdss_mdp_clk = NULL;
1401 struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
1402
1403 if (mdp_vote_clk) {
1404 mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
1405 if (mdss_mdp_clk) {
1406 clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
1407 clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH);
1408 clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_CLEAR);
1409 }
1410 }
1411}
1412
1413/**
1414 * mdss_mdp_idle_pc_restore() - Restore MDSS settings when exiting idle pc
1415 *
1416 * MDSS GDSC can be voted off during idle-screen usecase for MIPI DSI command
1417 * mode displays, referred to as MDSS idle power collapse. Upon subsequent
1418 * frame update, MDSS GDSC needs to turned back on and hw state needs to be
1419 * restored.
1420 */
1421static int mdss_mdp_idle_pc_restore(void)
1422{
1423 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1424 int rc = 0;
1425
1426 mutex_lock(&mdp_fs_idle_pc_lock);
1427 if (!mdata->idle_pc) {
1428 pr_debug("no idle pc, no need to restore\n");
1429 goto end;
1430 }
1431
1432 pr_debug("called from %pS\n", __builtin_return_address(0));
1433 rc = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301434 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301435 pr_err("mdss iommu attach failed rc=%d\n", rc);
1436 goto end;
1437 }
1438 mdss_hw_init(mdata);
1439 mdss_iommu_ctrl(0);
1440
1441 /**
1442 * sleep 10 microseconds to make sure AD auto-reinitialization
1443 * is done
1444 */
1445 udelay(10);
1446 mdss_mdp_memory_retention_exit();
1447
1448 mdss_mdp_ctl_restore(true);
1449 mdata->idle_pc = false;
1450
1451end:
1452 mutex_unlock(&mdp_fs_idle_pc_lock);
1453 return rc;
1454}
1455
1456/**
1457 * mdss_bus_bandwidth_ctrl() -- place bus bandwidth request
1458 * @enable: value of enable or disable
1459 *
1460 * Function place bus bandwidth request to allocate saved bandwidth
1461 * if enabled or free bus bandwidth allocation if disabled.
1462 * Bus bandwidth is required by mdp.For dsi, it only requires to send
1463 * dcs coammnd. It returns error if bandwidth request fails.
1464 */
1465void mdss_bus_bandwidth_ctrl(int enable)
1466{
1467 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1468 int changed = 0;
1469
1470 mutex_lock(&mdata->bus_lock);
1471 if (enable) {
1472 if (mdata->bus_ref_cnt == 0)
1473 changed++;
1474 mdata->bus_ref_cnt++;
1475 } else {
1476 if (mdata->bus_ref_cnt) {
1477 mdata->bus_ref_cnt--;
1478 if (mdata->bus_ref_cnt == 0)
1479 changed++;
1480 } else {
1481 pr_err("Can not be turned off\n");
1482 }
1483 }
1484
1485 pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
1486 __builtin_return_address(0), current->group_leader->comm,
1487 mdata->bus_ref_cnt, changed, enable);
1488
1489 if (changed) {
1490 MDSS_XLOG(mdata->bus_ref_cnt, enable);
1491
1492 if (!enable) {
1493 if (!mdata->handoff_pending) {
1494 msm_bus_scale_client_update_request(
1495 mdata->bus_hdl, 0);
1496 mdata->ao_bw_uc_idx = 0;
1497 }
1498 pm_runtime_mark_last_busy(&mdata->pdev->dev);
1499 pm_runtime_put_autosuspend(&mdata->pdev->dev);
1500 } else {
1501 pm_runtime_get_sync(&mdata->pdev->dev);
1502 msm_bus_scale_client_update_request(
1503 mdata->bus_hdl, mdata->curr_bw_uc_idx);
1504 }
1505 }
1506
1507 mutex_unlock(&mdata->bus_lock);
1508}
1509EXPORT_SYMBOL(mdss_bus_bandwidth_ctrl);
1510
1511void mdss_mdp_clk_ctrl(int enable)
1512{
1513 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1514 static int mdp_clk_cnt;
1515 unsigned long flags;
1516 int changed = 0;
1517 int rc = 0;
1518
1519 mutex_lock(&mdp_clk_lock);
1520 if (enable) {
1521 if (mdp_clk_cnt == 0)
1522 changed++;
1523 mdp_clk_cnt++;
1524 } else {
1525 if (mdp_clk_cnt) {
1526 mdp_clk_cnt--;
1527 if (mdp_clk_cnt == 0)
1528 changed++;
1529 } else {
1530 pr_err("Can not be turned off\n");
1531 }
1532 }
1533
1534 if (changed)
1535 MDSS_XLOG(mdp_clk_cnt, enable, current->pid);
1536
1537 pr_debug("%pS: task:%s clk_cnt=%d changed=%d enable=%d\n",
1538 __builtin_return_address(0), current->group_leader->comm,
1539 mdata->bus_ref_cnt, changed, enable);
1540
1541 if (changed) {
1542 if (enable) {
1543 pm_runtime_get_sync(&mdata->pdev->dev);
1544
1545 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1546 VOTE_INDEX_LOW);
1547
1548 rc = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301549 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301550 pr_err("IOMMU attach failed\n");
1551
1552 /* Active+Sleep */
1553 msm_bus_scale_client_update_context(mdata->bus_hdl,
1554 false, mdata->curr_bw_uc_idx);
1555 }
1556
1557 spin_lock_irqsave(&mdp_lock, flags);
1558 mdata->clk_ena = enable;
1559 spin_unlock_irqrestore(&mdp_lock, flags);
1560
1561 mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
1562 mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
1563 mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
1564 mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
1565 if (mdata->vsync_ena)
1566 mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
1567
1568 if (!enable) {
1569 /* release iommu control */
1570 mdss_iommu_ctrl(0);
1571
1572 /* Active-Only */
1573 msm_bus_scale_client_update_context(mdata->bus_hdl,
1574 true, mdata->ao_bw_uc_idx);
1575
1576 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1577 VOTE_INDEX_DISABLE);
1578
1579 pm_runtime_mark_last_busy(&mdata->pdev->dev);
1580 pm_runtime_put_autosuspend(&mdata->pdev->dev);
1581 }
1582 }
1583
1584 if (enable && changed)
1585 mdss_mdp_idle_pc_restore();
1586
1587 mutex_unlock(&mdp_clk_lock);
1588}
1589
1590static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata,
1591 char *clk_name, int clk_idx)
1592{
1593 struct clk *tmp;
1594
1595 if (clk_idx >= MDSS_MAX_CLK) {
1596 pr_err("invalid clk index %d\n", clk_idx);
1597 return -EINVAL;
1598 }
1599
1600 tmp = devm_clk_get(&mdata->pdev->dev, clk_name);
1601 if (IS_ERR(tmp)) {
1602 pr_err("unable to get clk: %s\n", clk_name);
1603 return PTR_ERR(tmp);
1604 }
1605
1606 mdata->mdp_clk[clk_idx] = tmp;
1607 return 0;
1608}
1609
1610#define SEC_DEVICE_MDSS 1
1611
1612static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata)
1613{
1614 int ret, scm_ret = 0;
1615
1616 if (test_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map))
1617 return;
1618
1619 pr_debug("restoring mdss secure config\n");
1620
1621 __mdss_mdp_reg_access_clk_enable(mdata, true);
1622
1623 ret = scm_restore_sec_cfg(SEC_DEVICE_MDSS, 0, &scm_ret);
1624 if (ret || scm_ret)
1625 pr_warn("scm_restore_sec_cfg failed %d %d\n",
1626 ret, scm_ret);
1627
1628 __mdss_mdp_reg_access_clk_enable(mdata, false);
1629}
1630
1631static int mdss_mdp_gdsc_notifier_call(struct notifier_block *self,
1632 unsigned long event, void *data)
1633{
1634 struct mdss_data_type *mdata;
1635
1636 mdata = container_of(self, struct mdss_data_type, gdsc_cb);
1637
1638 if (event & REGULATOR_EVENT_ENABLE) {
1639 /*
1640 * As SMMU in low tier targets is not power collapsible,
1641 * hence we don't need to restore sec configuration.
1642 */
1643 if (!mdss_mdp_req_init_restore_cfg(mdata))
1644 __mdss_restore_sec_cfg(mdata);
1645 } else if (event & REGULATOR_EVENT_PRE_DISABLE) {
1646 pr_debug("mdss gdsc is getting disabled\n");
1647 /* halt the vbif transactions */
1648 mdss_mdp_vbif_axi_halt(mdata);
1649 }
1650
1651 return NOTIFY_OK;
1652}
1653
1654static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
1655{
1656 int ret;
1657
1658 ret = of_property_read_u32(mdata->pdev->dev.of_node,
1659 "qcom,max-clk-rate", &mdata->max_mdp_clk_rate);
1660 if (ret) {
1661 pr_err("failed to get max mdp clock rate\n");
1662 return ret;
1663 }
1664
1665 pr_debug("max mdp clk rate=%d\n", mdata->max_mdp_clk_rate);
1666
1667 ret = devm_request_irq(&mdata->pdev->dev, mdss_mdp_hw.irq_info->irq,
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301668 mdss_irq_handler, 0, "MDSS", mdata);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301669 if (ret) {
1670 pr_err("mdp request_irq() failed!\n");
1671 return ret;
1672 }
1673 disable_irq(mdss_mdp_hw.irq_info->irq);
1674
1675 mdata->fs = devm_regulator_get(&mdata->pdev->dev, "vdd");
1676 if (IS_ERR_OR_NULL(mdata->fs)) {
1677 mdata->fs = NULL;
1678 pr_err("unable to get gdsc regulator\n");
1679 return -EINVAL;
1680 }
1681
1682 mdata->venus = devm_regulator_get_optional(&mdata->pdev->dev,
1683 "gdsc-venus");
1684 if (IS_ERR_OR_NULL(mdata->venus)) {
1685 mdata->venus = NULL;
1686 pr_debug("unable to get venus gdsc regulator\n");
1687 }
1688
1689 mdata->fs_ena = false;
1690
1691 mdata->gdsc_cb.notifier_call = mdss_mdp_gdsc_notifier_call;
1692 mdata->gdsc_cb.priority = 5;
1693 if (regulator_register_notifier(mdata->fs, &(mdata->gdsc_cb)))
1694 pr_warn("GDSC notification registration failed!\n");
1695 else
1696 mdata->regulator_notif_register = true;
1697
1698 mdata->vdd_cx = devm_regulator_get_optional(&mdata->pdev->dev,
1699 "vdd-cx");
1700 if (IS_ERR_OR_NULL(mdata->vdd_cx)) {
1701 pr_debug("unable to get CX reg. rc=%d\n",
1702 PTR_RET(mdata->vdd_cx));
1703 mdata->vdd_cx = NULL;
1704 }
1705
1706 mdata->reg_bus_clt = mdss_reg_bus_vote_client_create("mdp\0");
1707 if (IS_ERR(mdata->reg_bus_clt)) {
1708 pr_err("bus client register failed\n");
1709 return PTR_ERR(mdata->reg_bus_clt);
1710 }
1711
1712 if (mdss_mdp_irq_clk_register(mdata, "bus_clk", MDSS_CLK_AXI) ||
1713 mdss_mdp_irq_clk_register(mdata, "iface_clk", MDSS_CLK_AHB) ||
1714 mdss_mdp_irq_clk_register(mdata, "core_clk",
1715 MDSS_CLK_MDP_CORE))
1716 return -EINVAL;
1717
1718 /* lut_clk is not present on all MDSS revisions */
1719 mdss_mdp_irq_clk_register(mdata, "lut_clk", MDSS_CLK_MDP_LUT);
1720
1721 /* vsync_clk is optional for non-smart panels */
1722 mdss_mdp_irq_clk_register(mdata, "vsync_clk", MDSS_CLK_MDP_VSYNC);
1723
1724 /* Setting the default clock rate to the max supported.*/
1725 mdss_mdp_set_clk_rate(mdata->max_mdp_clk_rate);
1726 pr_debug("mdp clk rate=%ld\n",
1727 mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false));
1728
1729 return 0;
1730}
1731
1732static void mdss_debug_enable_clock(int on)
1733{
1734 if (on)
1735 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
1736 else
1737 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
1738}
1739
1740static int mdss_mdp_debug_init(struct platform_device *pdev,
1741 struct mdss_data_type *mdata)
1742{
1743 int rc;
1744 struct mdss_debug_base *dbg_blk;
1745
1746 mdata->debug_inf.debug_enable_clock = mdss_debug_enable_clock;
1747
1748 rc = mdss_debugfs_init(mdata);
1749 if (rc)
1750 return rc;
1751
1752 rc = mdss_mdp_debugfs_init(mdata);
1753 if (rc) {
1754 mdss_debugfs_remove(mdata);
1755 return rc;
1756 }
1757
1758 mdss_debug_register_io("mdp", &mdata->mdss_io, &dbg_blk);
1759 mdss_debug_register_dump_range(pdev, dbg_blk, "qcom,regs-dump-mdp",
1760 "qcom,regs-dump-names-mdp", "qcom,regs-dump-xin-id-mdp");
1761
1762 if (mdata->vbif_io.base)
1763 mdss_debug_register_io("vbif", &mdata->vbif_io, NULL);
1764 if (mdata->vbif_nrt_io.base)
1765 mdss_debug_register_io("vbif_nrt", &mdata->vbif_nrt_io, NULL);
1766
1767 return 0;
1768}
1769
1770static u32 mdss_get_props(void)
1771{
1772 u32 props = 0;
1773 void __iomem *props_base = ioremap(0xFC4B8114, 4);
1774
1775 if (props_base) {
1776 props = readl_relaxed(props_base);
1777 iounmap(props_base);
1778 }
1779 return props;
1780}
1781
1782void mdss_mdp_init_default_prefill_factors(struct mdss_data_type *mdata)
1783{
1784 mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor = 8;
1785 mdata->prefill_data.prefill_factors.fmt_mt_factor = 4;
1786 mdata->prefill_data.prefill_factors.fmt_linear_factor = 1;
1787 mdata->prefill_data.prefill_factors.scale_factor = 1;
1788 mdata->prefill_data.prefill_factors.xtra_ff_factor = 2;
1789
1790 if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
1791 mdata->prefill_data.ts_threshold = 25;
1792 mdata->prefill_data.ts_end = 8;
1793 mdata->prefill_data.ts_rate.numer = 1;
1794 mdata->prefill_data.ts_rate.denom = 4;
1795 mdata->prefill_data.ts_overhead = 2;
1796 }
1797}
1798
1799static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
1800{
1801
1802 mdata->per_pipe_ib_factor.numer = 0;
1803 mdata->per_pipe_ib_factor.denom = 0;
1804 mdata->apply_post_scale_bytes = true;
1805 mdata->hflip_buffer_reused = true;
1806 /* prevent disable of prefill calculations */
1807 mdata->min_prefill_lines = 0xffff;
1808 /* clock gating feature is disabled by default */
1809 mdata->enable_gate = false;
1810 mdata->pixel_ram_size = 0;
1811 mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_FLAT;
1812
1813 mdss_mdp_hw_rev_debug_caps_init(mdata);
1814
1815 switch (mdata->mdp_rev) {
1816 case MDSS_MDP_HW_REV_107:
1817 mdss_set_quirk(mdata, MDSS_QUIRK_ROTCDP);
1818 case MDSS_MDP_HW_REV_107_1:
1819 mdss_mdp_format_flag_removal(invalid_mdp107_wb_output_fmts,
1820 ARRAY_SIZE(invalid_mdp107_wb_output_fmts),
1821 VALID_MDP_WB_INTF_FORMAT);
1822 /* fall-through */
1823 case MDSS_MDP_HW_REV_107_2:
1824 mdata->max_target_zorder = 7; /* excluding base layer */
1825 mdata->max_cursor_size = 128;
1826 mdata->per_pipe_ib_factor.numer = 8;
1827 mdata->per_pipe_ib_factor.denom = 5;
1828 mdata->apply_post_scale_bytes = false;
1829 mdata->hflip_buffer_reused = false;
1830 mdata->min_prefill_lines = 21;
1831 mdata->has_ubwc = true;
1832 mdata->pixel_ram_size = 50 * 1024;
1833 set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
1834 set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
1835 set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
1836 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1837 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1838 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1839 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1840 set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
1841 mdata->mdss_caps_map);
1842 set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
1843 mdata->mdss_caps_map);
1844 mdss_mdp_init_default_prefill_factors(mdata);
1845 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
1846 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
1847 mdss_set_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED);
1848 break;
1849 case MDSS_MDP_HW_REV_105:
1850 case MDSS_MDP_HW_REV_109:
1851 mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC);
1852 mdata->max_target_zorder = 7; /* excluding base layer */
1853 mdata->max_cursor_size = 128;
1854 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1855 set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
1856 mdata->mdss_caps_map);
1857 break;
1858 case MDSS_MDP_HW_REV_110:
1859 mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC);
1860 mdata->max_target_zorder = 4; /* excluding base layer */
1861 mdata->max_cursor_size = 128;
1862 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1863 mdata->min_prefill_lines = 12;
1864 mdata->props = mdss_get_props();
1865 break;
1866 case MDSS_MDP_HW_REV_112:
1867 mdata->max_target_zorder = 4; /* excluding base layer */
1868 mdata->max_cursor_size = 64;
1869 mdata->min_prefill_lines = 12;
1870 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1871 break;
1872 case MDSS_MDP_HW_REV_114:
1873 /* disable ECG for 28nm PHY platform */
1874 mdata->enable_gate = false;
1875 case MDSS_MDP_HW_REV_116:
1876 mdata->max_target_zorder = 4; /* excluding base layer */
1877 mdata->max_cursor_size = 128;
1878 mdata->min_prefill_lines = 14;
1879 mdata->has_ubwc = true;
1880 mdata->pixel_ram_size = 40 * 1024;
1881 mdata->apply_post_scale_bytes = false;
1882 mdata->hflip_buffer_reused = false;
1883 mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
1884 set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
1885 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1886 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1887 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1888 mdss_mdp_init_default_prefill_factors(mdata);
1889 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1890 mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
1891 mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
1892 break;
1893 case MDSS_MDP_HW_REV_115:
1894 mdata->max_target_zorder = 4; /* excluding base layer */
1895 mdata->max_cursor_size = 128;
1896 mdata->min_prefill_lines = 14;
1897 mdata->has_ubwc = false;
1898 mdata->pixel_ram_size = 16 * 1024;
1899 mdata->apply_post_scale_bytes = false;
1900 mdata->hflip_buffer_reused = false;
1901 /* disable ECG for 28nm PHY platform */
1902 mdata->enable_gate = false;
1903 mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
1904 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1905 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1906 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1907 set_bit(MDSS_CAPS_MIXER_1_FOR_WB, mdata->mdss_caps_map);
1908 mdss_mdp_init_default_prefill_factors(mdata);
1909 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1910 mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
1911 mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
1912 break;
1913 case MDSS_MDP_HW_REV_300:
1914 case MDSS_MDP_HW_REV_301:
1915 mdata->max_target_zorder = 7; /* excluding base layer */
1916 mdata->max_cursor_size = 384;
1917 mdata->per_pipe_ib_factor.numer = 8;
1918 mdata->per_pipe_ib_factor.denom = 5;
1919 mdata->apply_post_scale_bytes = false;
1920 mdata->hflip_buffer_reused = false;
1921 mdata->min_prefill_lines = 25;
1922 mdata->has_ubwc = true;
1923 mdata->pixel_ram_size = 50 * 1024;
1924 mdata->rects_per_sspp[MDSS_MDP_PIPE_TYPE_DMA] = 2;
1925
1926 set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
1927 set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
1928 set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
1929 set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
1930 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1931 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1932 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1933 set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
1934 set_bit(MDSS_QOS_IB_NOCR, mdata->mdss_qos_map);
1935 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1936 set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
1937 mdata->mdss_caps_map);
1938 set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
1939 mdata->mdss_caps_map);
1940 set_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map);
1941 set_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map);
1942 mdss_mdp_init_default_prefill_factors(mdata);
1943 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
1944 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
1945 mdss_set_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
1946 mdata->has_wb_ubwc = true;
1947 set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map);
1948 break;
1949 default:
1950 mdata->max_target_zorder = 4; /* excluding base layer */
1951 mdata->max_cursor_size = 64;
1952 }
1953
1954 if (mdata->mdp_rev < MDSS_MDP_HW_REV_103)
1955 mdss_set_quirk(mdata, MDSS_QUIRK_DOWNSCALE_HANG);
1956
1957 if (mdata->mdp_rev < MDSS_MDP_HW_REV_102 ||
1958 mdata->mdp_rev == MDSS_MDP_HW_REV_200)
1959 mdss_set_quirk(mdata, MDSS_QUIRK_FMT_PACK_PATTERN);
1960}
1961
1962static void mdss_hw_rev_init(struct mdss_data_type *mdata)
1963{
1964 if (mdata->mdp_rev)
1965 return;
1966
1967 mdata->mdp_rev = MDSS_REG_READ(mdata, MDSS_REG_HW_VERSION);
1968 mdss_mdp_hw_rev_caps_init(mdata);
1969}
1970
1971/**
1972 * mdss_hw_init() - Initialize MDSS target specific register settings
1973 * @mdata: MDP private data
1974 *
1975 * Initialize basic MDSS hardware settings based on the board specific
1976 * parameters. This function does not explicitly turn on the MDP clocks
1977 * and so it must be called with the MDP clocks already enabled.
1978 */
1979void mdss_hw_init(struct mdss_data_type *mdata)
1980{
1981 struct mdss_mdp_pipe *vig;
1982
1983 mdss_hw_rev_init(mdata);
1984
1985 /* Disable hw underrun recovery only for older mdp reversions. */
1986 if (mdata->mdp_rev < MDSS_MDP_HW_REV_105)
1987 writel_relaxed(0x0, mdata->mdp_base +
1988 MDSS_MDP_REG_VIDEO_INTF_UNDERFLOW_CTL);
1989
1990 if (mdata->hw_settings) {
1991 struct mdss_hw_settings *hws = mdata->hw_settings;
1992
1993 while (hws->reg) {
1994 writel_relaxed(hws->val, hws->reg);
1995 hws++;
1996 }
1997 }
1998
1999 vig = mdata->vig_pipes;
2000
2001 mdata->nmax_concurrent_ad_hw =
2002 (mdata->mdp_rev < MDSS_MDP_HW_REV_103) ? 1 : 2;
2003
2004 pr_debug("MDP hw init done\n");
2005}
2006
2007static u32 mdss_mdp_res_init(struct mdss_data_type *mdata)
2008{
2009 u32 rc = 0;
2010
2011 if (mdata->res_init) {
2012 pr_err("mdss resources already initialized\n");
2013 return -EPERM;
2014 }
2015
2016 mdata->res_init = true;
2017 mdata->clk_ena = false;
2018 mdss_mdp_hw.irq_info->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
2019 mdss_mdp_hw.irq_info->irq_ena = false;
2020
2021 rc = mdss_mdp_irq_clk_setup(mdata);
2022 if (rc)
2023 return rc;
2024
2025 mdata->hist_intr.req = 0;
2026 mdata->hist_intr.curr = 0;
2027 mdata->hist_intr.state = 0;
2028 spin_lock_init(&mdata->hist_intr.lock);
2029
2030 mdata->iclient = msm_ion_client_create(mdata->pdev->name);
2031 if (IS_ERR_OR_NULL(mdata->iclient)) {
2032 pr_err("msm_ion_client_create() return error (%pK)\n",
2033 mdata->iclient);
2034 mdata->iclient = NULL;
2035 }
2036
2037 return rc;
2038}
2039
2040static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata,
2041 struct device *dev)
2042{
2043 int ret;
2044 struct device_node *node;
2045 u32 prop_val;
2046
2047 if (!dev)
2048 return -EPERM;
2049
2050 node = of_get_child_by_name(dev->of_node, "qcom,mdss-scaler-offsets");
2051 if (!node)
2052 return 0;
2053
2054 if (mdata->scaler_off)
2055 return -EFAULT;
2056
2057 mdata->scaler_off = devm_kzalloc(&mdata->pdev->dev,
2058 sizeof(*mdata->scaler_off), GFP_KERNEL);
2059 if (!mdata->scaler_off)
2060 return -ENOMEM;
2061
2062 ret = of_property_read_u32(node,
2063 "qcom,mdss-vig-scaler-off",
2064 &prop_val);
2065 if (ret) {
2066 pr_err("read property %s failed ret %d\n",
2067 "qcom,mdss-vig-scaler-off", ret);
2068 return -EINVAL;
2069 }
2070 mdata->scaler_off->vig_scaler_off = prop_val;
2071 ret = of_property_read_u32(node,
2072 "qcom,mdss-vig-scaler-lut-off",
2073 &prop_val);
2074 if (ret) {
2075 pr_err("read property %s failed ret %d\n",
2076 "qcom,mdss-vig-scaler-lut-off", ret);
2077 return -EINVAL;
2078 }
2079 mdata->scaler_off->vig_scaler_lut_off = prop_val;
2080 mdata->scaler_off->has_dest_scaler =
2081 of_property_read_bool(mdata->pdev->dev.of_node,
2082 "qcom,mdss-has-dest-scaler");
2083 if (mdata->scaler_off->has_dest_scaler) {
2084 ret = of_property_read_u32(node,
2085 "qcom,mdss-dest-block-off",
2086 &prop_val);
2087 if (ret) {
2088 pr_err("read property %s failed ret %d\n",
2089 "qcom,mdss-dest-block-off", ret);
2090 return -EINVAL;
2091 }
2092 mdata->scaler_off->dest_base = mdata->mdss_io.base +
2093 prop_val;
2094 mdata->scaler_off->ndest_scalers =
2095 mdss_mdp_parse_dt_prop_len(mdata->pdev,
2096 "qcom,mdss-dest-scalers-off");
2097 mdata->scaler_off->dest_scaler_off =
2098 devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
2099 mdata->scaler_off->ndest_scalers,
2100 GFP_KERNEL);
2101 if (!mdata->scaler_off->dest_scaler_off) {
2102 kfree(mdata->scaler_off->dest_scaler_off);
2103 return -ENOMEM;
2104 }
2105 ret = mdss_mdp_parse_dt_handler(mdata->pdev,
2106 "qcom,mdss-dest-scaler-off",
2107 mdata->scaler_off->dest_scaler_off,
2108 mdata->scaler_off->ndest_scalers);
2109 if (ret)
2110 return -EINVAL;
2111 mdata->scaler_off->dest_scaler_lut_off =
2112 devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
2113 mdata->scaler_off->ndest_scalers,
2114 GFP_KERNEL);
2115 if (!mdata->scaler_off->dest_scaler_lut_off) {
2116 kfree(mdata->scaler_off->dest_scaler_lut_off);
2117 return -ENOMEM;
2118 }
2119 ret = mdss_mdp_parse_dt_handler(mdata->pdev,
2120 "qcom,mdss-dest-scalers-lut-off",
2121 mdata->scaler_off->dest_scaler_lut_off,
2122 mdata->scaler_off->ndest_scalers);
2123 if (ret)
2124 return -EINVAL;
2125 }
2126
2127 return 0;
2128}
2129
2130/**
2131 * mdss_mdp_footswitch_ctrl_splash() - clocks handoff for cont. splash screen
2132 * @on: 1 to start handoff, 0 to complete the handoff after first frame update
2133 *
2134 * MDSS Clocks and GDSC are already on during continuous splash screen, but
2135 * increasing ref count will keep clocks from being turned off until handoff
2136 * has properly happened after frame update.
2137 */
2138void mdss_mdp_footswitch_ctrl_splash(int on)
2139{
2140 int ret;
2141 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2142
2143 if (mdata != NULL) {
2144 if (on) {
2145 mdata->handoff_pending = true;
2146 pr_debug("Enable MDP FS for splash.\n");
2147 if (mdata->venus) {
2148 ret = regulator_enable(mdata->venus);
2149 if (ret)
2150 pr_err("venus failed to enable\n");
2151 }
2152
2153 ret = regulator_enable(mdata->fs);
2154 if (ret)
2155 pr_err("Footswitch failed to enable\n");
2156
2157 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2158 mdss_bus_bandwidth_ctrl(true);
2159 } else {
2160 pr_debug("Disable MDP FS for splash.\n");
2161 mdss_bus_bandwidth_ctrl(false);
2162 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2163 regulator_disable(mdata->fs);
2164 if (mdata->venus)
2165 regulator_disable(mdata->venus);
2166 mdata->handoff_pending = false;
2167 }
2168 } else {
2169 pr_warn("mdss mdata not initialized\n");
2170 }
2171}
2172
2173static int mdss_mdp_get_pan_intf(const char *pan_intf)
2174{
2175 int i, rc = MDSS_PANEL_INTF_INVALID;
2176
2177 if (!pan_intf)
2178 return rc;
2179
2180 for (i = 0; i < ARRAY_SIZE(pan_types); i++) {
2181 if (!strcmp(pan_intf, pan_types[i].name)) {
2182 rc = pan_types[i].type;
2183 break;
2184 }
2185 }
2186 return rc;
2187}
2188
2189static int mdss_mdp_get_pan_cfg(struct mdss_panel_cfg *pan_cfg)
2190{
2191 char *t = NULL;
2192 char pan_intf_str[MDSS_MAX_PANEL_LEN];
2193 int rc, i, panel_len;
2194 char pan_name[MDSS_MAX_PANEL_LEN] = {'\0'};
2195
2196 if (!pan_cfg)
2197 return -EINVAL;
2198
2199 if (mdss_mdp_panel[0] == '0') {
2200 pr_debug("panel name is not set\n");
2201 pan_cfg->lk_cfg = false;
2202 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2203 return -EINVAL;
2204 } else if (mdss_mdp_panel[0] == '1') {
2205 pan_cfg->lk_cfg = true;
2206 } else {
2207 /* read from dt */
2208 pan_cfg->lk_cfg = true;
2209 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2210 return -EINVAL;
2211 }
2212
2213 /* skip lk cfg and delimiter; ex: "1:" */
2214 strlcpy(pan_name, &mdss_mdp_panel[2], MDSS_MAX_PANEL_LEN);
2215 t = strnstr(pan_name, ":", MDSS_MAX_PANEL_LEN);
2216 if (!t) {
2217 pr_err("pan_name=[%s] invalid\n", pan_name);
2218 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2219 return -EINVAL;
2220 }
2221
2222 for (i = 0; ((pan_name + i) < t) && (i < 4); i++)
2223 pan_intf_str[i] = *(pan_name + i);
2224 pan_intf_str[i] = 0;
2225 pr_debug("%d panel intf %s\n", __LINE__, pan_intf_str);
2226 /* point to the start of panel name */
2227 t = t + 1;
2228 strlcpy(&pan_cfg->arg_cfg[0], t, sizeof(pan_cfg->arg_cfg));
2229 pr_debug("%d: t=[%s] panel name=[%s]\n", __LINE__,
2230 t, pan_cfg->arg_cfg);
2231
2232 panel_len = strlen(pan_cfg->arg_cfg);
2233 if (!panel_len) {
2234 pr_err("Panel name is invalid\n");
2235 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2236 return -EINVAL;
2237 }
2238
2239 rc = mdss_mdp_get_pan_intf(pan_intf_str);
2240 pan_cfg->pan_intf = (rc < 0) ? MDSS_PANEL_INTF_INVALID : rc;
2241 return 0;
2242}
2243
2244static int mdss_mdp_parse_dt_pan_intf(struct platform_device *pdev)
2245{
2246 int rc;
2247 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2248 const char *prim_intf = NULL;
2249
2250 rc = of_property_read_string(pdev->dev.of_node,
2251 "qcom,mdss-pref-prim-intf", &prim_intf);
2252 if (rc)
2253 return -ENODEV;
2254
2255 rc = mdss_mdp_get_pan_intf(prim_intf);
2256 if (rc < 0) {
2257 mdata->pan_cfg.pan_intf = MDSS_PANEL_INTF_INVALID;
2258 } else {
2259 mdata->pan_cfg.pan_intf = rc;
2260 rc = 0;
2261 }
2262 return rc;
2263}
2264
2265static int mdss_mdp_get_cmdline_config(struct platform_device *pdev)
2266{
2267 int rc, len = 0;
2268 int *intf_type;
2269 char *panel_name;
2270 struct mdss_panel_cfg *pan_cfg;
2271 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2272
2273 mdata->pan_cfg.arg_cfg[MDSS_MAX_PANEL_LEN] = 0;
2274 pan_cfg = &mdata->pan_cfg;
2275 panel_name = &pan_cfg->arg_cfg[0];
2276 intf_type = &pan_cfg->pan_intf;
2277
2278 /* reads from dt by default */
2279 pan_cfg->lk_cfg = true;
2280
2281 len = strlen(mdss_mdp_panel);
2282
2283 if (len > 0) {
2284 rc = mdss_mdp_get_pan_cfg(pan_cfg);
2285 if (!rc) {
2286 pan_cfg->init_done = true;
2287 return rc;
2288 }
2289 }
2290
2291 rc = mdss_mdp_parse_dt_pan_intf(pdev);
2292 /* if pref pan intf is not present */
2293 if (rc)
2294 pr_warn("unable to parse device tree for pan intf\n");
2295
2296 pan_cfg->init_done = true;
2297
2298 return 0;
2299}
2300
2301static void __update_sspp_info(struct mdss_mdp_pipe *pipe,
2302 int pipe_cnt, char *type, char *buf, int *cnt)
2303{
2304 int i;
2305 int j;
2306 size_t len = PAGE_SIZE;
2307 int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
2308
2309#define SPRINT(fmt, ...) \
2310 (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
2311
2312 for (i = 0; i < pipe_cnt && pipe; i++) {
2313 SPRINT("pipe_num:%d pipe_type:%s pipe_ndx:%d rects:%d pipe_is_handoff:%d display_id:%d ",
2314 pipe->num, type, pipe->ndx, pipe->multirect.max_rects,
2315 pipe->is_handed_off, mdss_mdp_get_display_id(pipe));
2316 SPRINT("fmts_supported:");
2317 for (j = 0; j < num_bytes; j++)
2318 SPRINT("%d,", pipe->supported_formats[j]);
2319 SPRINT("\n");
2320 pipe += pipe->multirect.max_rects;
2321 }
2322#undef SPRINT
2323}
2324
2325static void mdss_mdp_update_sspp_info(struct mdss_data_type *mdata,
2326 char *buf, int *cnt)
2327{
2328 __update_sspp_info(mdata->vig_pipes, mdata->nvig_pipes,
2329 "vig", buf, cnt);
2330 __update_sspp_info(mdata->rgb_pipes, mdata->nrgb_pipes,
2331 "rgb", buf, cnt);
2332 __update_sspp_info(mdata->dma_pipes, mdata->ndma_pipes,
2333 "dma", buf, cnt);
2334 __update_sspp_info(mdata->cursor_pipes, mdata->ncursor_pipes,
2335 "cursor", buf, cnt);
2336}
2337
2338static void mdss_mdp_update_wb_info(struct mdss_data_type *mdata,
2339 char *buf, int *cnt)
2340{
2341#define SPRINT(fmt, ...) \
2342 (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
2343 size_t len = PAGE_SIZE;
2344 int i;
2345 int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
2346
2347 SPRINT("rot_input_fmts=");
2348 for (i = 0; i < num_bytes && mdata->wb; i++)
2349 SPRINT("%d ", mdata->wb->supported_input_formats[i]);
2350 SPRINT("\nrot_output_fmts=");
2351 for (i = 0; i < num_bytes && mdata->wb; i++)
2352 SPRINT("%d ", mdata->wb->supported_input_formats[i]);
2353 SPRINT("\nwb_output_fmts=");
2354 for (i = 0; i < num_bytes && mdata->wb; i++)
2355 SPRINT("%d ", mdata->wb->supported_output_formats[i]);
2356 SPRINT("\n");
2357#undef SPRINT
2358}
2359
2360ssize_t mdss_mdp_show_capabilities(struct device *dev,
2361 struct device_attribute *attr, char *buf)
2362{
2363 struct mdss_data_type *mdata = dev_get_drvdata(dev);
2364 size_t len = PAGE_SIZE;
2365 int cnt = 0;
2366
2367#define SPRINT(fmt, ...) \
2368 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2369
2370 SPRINT("mdp_version=5\n");
2371 SPRINT("hw_rev=%d\n", mdata->mdp_rev);
2372 SPRINT("pipe_count:%d\n", mdata->nvig_pipes + mdata->nrgb_pipes +
2373 mdata->ndma_pipes + mdata->ncursor_pipes);
2374 mdss_mdp_update_sspp_info(mdata, buf, &cnt);
2375 mdss_mdp_update_wb_info(mdata, buf, &cnt);
2376 /* TODO : need to remove num pipes info */
2377 SPRINT("rgb_pipes=%d\n", mdata->nrgb_pipes);
2378 SPRINT("vig_pipes=%d\n", mdata->nvig_pipes);
2379 SPRINT("dma_pipes=%d\n", mdata->ndma_pipes);
2380 SPRINT("blending_stages=%d\n", mdata->max_target_zorder);
2381 SPRINT("cursor_pipes=%d\n", mdata->ncursor_pipes);
2382 SPRINT("max_cursor_size=%d\n", mdata->max_cursor_size);
2383 SPRINT("smp_count=%d\n", mdata->smp_mb_cnt);
2384 SPRINT("smp_size=%d\n", mdata->smp_mb_size);
2385 SPRINT("smp_mb_per_pipe=%d\n", mdata->smp_mb_per_pipe);
2386 SPRINT("max_downscale_ratio=%d\n", MAX_DOWNSCALE_RATIO);
2387 SPRINT("max_upscale_ratio=%d\n", MAX_UPSCALE_RATIO);
2388
2389 if (mdata->nwb)
2390 SPRINT("wb_intf_index=%d\n", mdata->nwb - 1);
2391
2392 if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) {
2393 SPRINT("fmt_mt_nv12_factor=%d\n",
2394 mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor);
2395 SPRINT("fmt_mt_factor=%d\n",
2396 mdata->prefill_data.prefill_factors.fmt_mt_factor);
2397 SPRINT("fmt_linear_factor=%d\n",
2398 mdata->prefill_data.prefill_factors.fmt_linear_factor);
2399 SPRINT("scale_factor=%d\n",
2400 mdata->prefill_data.prefill_factors.scale_factor);
2401 SPRINT("xtra_ff_factor=%d\n",
2402 mdata->prefill_data.prefill_factors.xtra_ff_factor);
2403 }
2404
2405 if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
2406 SPRINT("amortizable_threshold=%d\n",
2407 mdata->prefill_data.ts_threshold);
2408 SPRINT("system_overhead_lines=%d\n",
2409 mdata->prefill_data.ts_overhead);
2410 }
2411
2412 if (mdata->props)
2413 SPRINT("props=%d\n", mdata->props);
2414 if (mdata->max_bw_low)
2415 SPRINT("max_bandwidth_low=%u\n", mdata->max_bw_low);
2416 if (mdata->max_bw_high)
2417 SPRINT("max_bandwidth_high=%u\n", mdata->max_bw_high);
2418 if (mdata->max_pipe_width)
2419 SPRINT("max_pipe_width=%d\n", mdata->max_pipe_width);
2420 if (mdata->max_mixer_width)
2421 SPRINT("max_mixer_width=%d\n", mdata->max_mixer_width);
2422 if (mdata->max_bw_per_pipe)
2423 SPRINT("max_pipe_bw=%u\n", mdata->max_bw_per_pipe);
2424 if (mdata->max_mdp_clk_rate)
2425 SPRINT("max_mdp_clk=%u\n", mdata->max_mdp_clk_rate);
2426 if (mdata->clk_factor.numer)
2427 SPRINT("clk_fudge_factor=%u,%u\n", mdata->clk_factor.numer,
2428 mdata->clk_factor.denom);
2429 if (mdata->has_rot_dwnscale) {
2430 if (mdata->rot_dwnscale_min)
2431 SPRINT("rot_dwnscale_min=%u\n",
2432 mdata->rot_dwnscale_min);
2433 if (mdata->rot_dwnscale_max)
2434 SPRINT("rot_dwnscale_max=%u\n",
2435 mdata->rot_dwnscale_max);
2436 }
2437 SPRINT("features=");
2438 if (mdata->has_bwc)
2439 SPRINT(" bwc");
2440 if (mdata->has_ubwc)
2441 SPRINT(" ubwc");
2442 if (mdata->has_wb_ubwc)
2443 SPRINT(" wb_ubwc");
2444 if (mdata->has_decimation)
2445 SPRINT(" decimation");
2446 if (mdata->highest_bank_bit && !mdss_mdp_is_ubwc_supported(mdata))
2447 SPRINT(" tile_format");
2448 if (mdata->has_non_scalar_rgb)
2449 SPRINT(" non_scalar_rgb");
2450 if (mdata->has_src_split)
2451 SPRINT(" src_split");
2452 if (mdata->has_rot_dwnscale)
2453 SPRINT(" rotator_downscale");
2454 if (mdata->max_bw_settings_cnt)
2455 SPRINT(" dynamic_bw_limit");
2456 if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
2457 SPRINT(" qseed3");
2458 if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map))
2459 SPRINT(" dest_scaler");
2460 if (mdata->has_separate_rotator)
2461 SPRINT(" separate_rotator");
2462 if (mdss_has_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED))
2463 SPRINT(" hdr");
2464 SPRINT("\n");
2465#undef SPRINT
2466
2467 return cnt;
2468}
2469
2470static ssize_t mdss_mdp_read_max_limit_bw(struct device *dev,
2471 struct device_attribute *attr, char *buf)
2472{
2473 struct mdss_data_type *mdata = dev_get_drvdata(dev);
2474 size_t len = PAGE_SIZE;
2475 u32 cnt = 0;
2476 int i;
2477
2478 char bw_names[4][8] = {"default", "camera", "hflip", "vflip"};
2479 char pipe_bw_names[4][16] = {"default_pipe", "camera_pipe",
2480 "hflip_pipe", "vflip_pipe"};
2481 struct mdss_max_bw_settings *bw_settings;
2482 struct mdss_max_bw_settings *pipe_bw_settings;
2483
2484 bw_settings = mdata->max_bw_settings;
2485 pipe_bw_settings = mdata->max_per_pipe_bw_settings;
2486
2487#define SPRINT(fmt, ...) \
2488 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2489
2490 SPRINT("bw_mode_bitmap=%d\n", mdata->bw_mode_bitmap);
2491 SPRINT("bw_limit_pending=%d\n", mdata->bw_limit_pending);
2492
2493 for (i = 0; i < mdata->max_bw_settings_cnt; i++) {
2494 SPRINT("%s=%d\n", bw_names[i], bw_settings->mdss_max_bw_val);
2495 bw_settings++;
2496 }
2497
2498 for (i = 0; i < mdata->mdss_per_pipe_bw_cnt; i++) {
2499 SPRINT("%s=%d\n", pipe_bw_names[i],
2500 pipe_bw_settings->mdss_max_bw_val);
2501 pipe_bw_settings++;
2502 }
2503
2504 return cnt;
2505}
2506
2507static ssize_t mdss_mdp_store_max_limit_bw(struct device *dev,
2508 struct device_attribute *attr, const char *buf, size_t len)
2509{
2510 struct mdss_data_type *mdata = dev_get_drvdata(dev);
2511 u32 data = 0;
2512
2513 if (kstrtouint(buf, 0, &data)) {
2514 pr_info("Not able scan to bw_mode_bitmap\n");
2515 } else {
2516 mdata->bw_mode_bitmap = data;
2517 mdata->bw_limit_pending = true;
2518 pr_debug("limit use case, bw_mode_bitmap = %d\n", data);
2519 }
2520
2521 return len;
2522}
2523
2524static DEVICE_ATTR(caps, 0444, mdss_mdp_show_capabilities, NULL);
2525static DEVICE_ATTR(bw_mode_bitmap, 0664,
2526 mdss_mdp_read_max_limit_bw, mdss_mdp_store_max_limit_bw);
2527
2528static struct attribute *mdp_fs_attrs[] = {
2529 &dev_attr_caps.attr,
2530 &dev_attr_bw_mode_bitmap.attr,
2531 NULL
2532};
2533
2534static struct attribute_group mdp_fs_attr_group = {
2535 .attrs = mdp_fs_attrs
2536};
2537
2538static int mdss_mdp_register_sysfs(struct mdss_data_type *mdata)
2539{
2540 struct device *dev = &mdata->pdev->dev;
2541 int rc;
2542
2543 rc = sysfs_create_group(&dev->kobj, &mdp_fs_attr_group);
2544
2545 return rc;
2546}
2547
2548int mdss_panel_get_intf_status(u32 disp_num, u32 intf_type)
2549{
2550 int rc, intf_status = 0;
2551 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2552
2553 if (!mdss_res || !mdss_res->pan_cfg.init_done)
2554 return -EPROBE_DEFER;
2555
2556 if (mdss_res->handoff_pending) {
2557 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2558 intf_status = readl_relaxed(mdata->mdp_base +
2559 MDSS_MDP_REG_DISP_INTF_SEL);
2560 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2561 if (intf_type == MDSS_PANEL_INTF_DSI) {
2562 if (disp_num == DISPLAY_1)
2563 rc = (intf_status & MDSS_MDP_INTF_DSI0_SEL);
2564 else if (disp_num == DISPLAY_2)
2565 rc = (intf_status & MDSS_MDP_INTF_DSI1_SEL);
2566 else
2567 rc = 0;
2568 } else if (intf_type == MDSS_PANEL_INTF_EDP) {
2569 intf_status &= MDSS_MDP_INTF_EDP_SEL;
2570 rc = (intf_status == MDSS_MDP_INTF_EDP_SEL);
2571 } else if (intf_type == MDSS_PANEL_INTF_HDMI) {
2572 intf_status &= MDSS_MDP_INTF_HDMI_SEL;
2573 rc = (intf_status == MDSS_MDP_INTF_HDMI_SEL);
2574 } else {
2575 rc = 0;
2576 }
2577 } else {
2578 rc = 0;
2579 }
2580
2581 return rc;
2582}
2583
2584static int mdss_mdp_probe(struct platform_device *pdev)
2585{
2586 struct resource *res;
2587 int rc;
2588 struct mdss_data_type *mdata;
2589 uint32_t intf_sel = 0;
2590 uint32_t split_display = 0;
2591 int num_of_display_on = 0;
2592 int i = 0;
2593
2594 if (!pdev->dev.of_node) {
2595 pr_err("MDP driver only supports device tree probe\n");
2596 return -ENOTSUPP;
2597 }
2598
2599 if (mdss_res) {
2600 pr_err("MDP already initialized\n");
2601 return -EINVAL;
2602 }
2603
2604 mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
2605 if (mdata == NULL)
2606 return -ENOMEM;
2607
2608 pdev->id = 0;
2609 mdata->pdev = pdev;
2610 platform_set_drvdata(pdev, mdata);
2611 mdss_res = mdata;
2612 mutex_init(&mdata->reg_lock);
2613 mutex_init(&mdata->reg_bus_lock);
2614 mutex_init(&mdata->bus_lock);
2615 INIT_LIST_HEAD(&mdata->reg_bus_clist);
2616 atomic_set(&mdata->sd_client_count, 0);
2617 atomic_set(&mdata->active_intf_cnt, 0);
2618
2619 mdss_res->mdss_util = mdss_get_util_intf();
2620 if (mdss_res->mdss_util == NULL) {
2621 pr_err("Failed to get mdss utility functions\n");
2622 return -ENODEV;
2623 }
2624
2625 mdss_res->mdss_util->get_iommu_domain = mdss_smmu_get_domain_id;
2626 mdss_res->mdss_util->iommu_attached = is_mdss_iommu_attached;
2627 mdss_res->mdss_util->iommu_ctrl = mdss_iommu_ctrl;
2628 mdss_res->mdss_util->bus_scale_set_quota = mdss_bus_scale_set_quota;
2629 mdss_res->mdss_util->bus_bandwidth_ctrl = mdss_bus_bandwidth_ctrl;
2630 mdss_res->mdss_util->panel_intf_type = mdss_panel_intf_type;
2631 mdss_res->mdss_util->panel_intf_status = mdss_panel_get_intf_status;
2632
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302633 rc = msm_dss_ioremap_byname(pdev, &mdata->mdss_io, "mdp_phys");
2634 if (rc) {
2635 pr_err("unable to map MDP base\n");
2636 goto probe_done;
2637 }
2638 pr_debug("MDSS HW Base addr=0x%x len=0x%x\n",
2639 (int) (unsigned long) mdata->mdss_io.base,
2640 mdata->mdss_io.len);
2641
2642 rc = msm_dss_ioremap_byname(pdev, &mdata->vbif_io, "vbif_phys");
2643 if (rc) {
2644 pr_err("unable to map MDSS VBIF base\n");
2645 goto probe_done;
2646 }
2647 pr_debug("MDSS VBIF HW Base addr=0x%x len=0x%x\n",
2648 (int) (unsigned long) mdata->vbif_io.base,
2649 mdata->vbif_io.len);
2650
2651 rc = msm_dss_ioremap_byname(pdev, &mdata->vbif_nrt_io, "vbif_nrt_phys");
2652 if (rc)
2653 pr_debug("unable to map MDSS VBIF non-realtime base\n");
2654 else
2655 pr_debug("MDSS VBIF NRT HW Base addr=%pK len=0x%x\n",
2656 mdata->vbif_nrt_io.base, mdata->vbif_nrt_io.len);
2657
2658 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2659 if (!res) {
2660 pr_err("unable to get MDSS irq\n");
2661 rc = -ENOMEM;
2662 goto probe_done;
2663 }
2664
2665 mdss_mdp_hw.irq_info = kcalloc(1, sizeof(struct irq_info), GFP_KERNEL);
2666 if (!mdss_mdp_hw.irq_info)
2667 return -ENOMEM;
2668
2669 mdss_mdp_hw.irq_info->irq = res->start;
2670 mdss_mdp_hw.ptr = mdata;
2671
2672 /* export misc. interrupts to external driver */
2673 mdata->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 32,
2674 &mdss_irq_domain_ops, mdata);
2675 if (!mdata->irq_domain) {
2676 pr_err("unable to add linear domain\n");
2677 rc = -ENOMEM;
2678 goto probe_done;
2679 }
2680
2681 mdss_misc_hw.irq_info = mdss_intr_line();
2682 rc = mdss_res->mdss_util->register_irq(&mdss_misc_hw);
2683 if (rc)
2684 pr_err("mdss_register_irq failed.\n");
2685
2686 rc = mdss_mdp_res_init(mdata);
2687 if (rc) {
2688 pr_err("unable to initialize mdss mdp resources\n");
2689 goto probe_done;
2690 }
2691
2692 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT_MS);
2693 if (mdata->idle_pc_enabled)
2694 pm_runtime_use_autosuspend(&pdev->dev);
2695 pm_runtime_set_suspended(&pdev->dev);
2696 pm_runtime_enable(&pdev->dev);
2697 if (!pm_runtime_enabled(&pdev->dev))
2698 mdss_mdp_footswitch_ctrl(mdata, true);
2699
2700 rc = mdss_mdp_bus_scale_register(mdata);
2701 if (rc) {
2702 pr_err("unable to register bus scaling\n");
2703 goto probe_done;
2704 }
2705
2706 /*
2707 * enable clocks and read mdp_rev as soon as possible once
2708 * kernel is up.
2709 */
2710 mdss_mdp_footswitch_ctrl_splash(true);
2711 mdss_hw_rev_init(mdata);
2712
2713 /*populate hw iomem base info from device tree*/
2714 rc = mdss_mdp_parse_dt(pdev);
2715 if (rc) {
2716 pr_err("unable to parse device tree\n");
2717 goto probe_done;
2718 }
2719
2720 rc = mdss_mdp_get_cmdline_config(pdev);
2721 if (rc) {
2722 pr_err("Error in panel override:rc=[%d]\n", rc);
2723 goto probe_done;
2724 }
2725
2726 rc = mdss_mdp_debug_init(pdev, mdata);
2727 if (rc) {
2728 pr_err("unable to initialize mdp debugging\n");
2729 goto probe_done;
2730 }
2731 rc = mdss_mdp_scaler_init(mdata, &pdev->dev);
2732 if (rc)
2733 goto probe_done;
2734
2735 rc = mdss_mdp_register_sysfs(mdata);
2736 if (rc)
2737 pr_err("unable to register mdp sysfs nodes\n");
2738
2739 rc = mdss_fb_register_mdp_instance(&mdp5);
2740 if (rc)
2741 pr_err("unable to register mdp instance\n");
2742
2743 rc = mdss_res->mdss_util->register_irq(&mdss_mdp_hw);
2744 if (rc)
2745 pr_err("mdss_register_irq failed.\n");
2746
2747 rc = mdss_smmu_init(mdata, &pdev->dev);
2748 if (rc)
2749 pr_err("mdss smmu init failed\n");
2750
2751 mdss_mdp_set_supported_formats(mdata);
2752
2753 mdss_res->mdss_util->mdp_probe_done = true;
2754
2755 mdss_hw_init(mdata);
2756
2757 rc = mdss_mdp_pp_init(&pdev->dev);
2758 if (rc)
2759 pr_err("unable to initialize mdss pp resources\n");
2760
2761 /* Restoring Secure configuration during boot-up */
2762 if (mdss_mdp_req_init_restore_cfg(mdata))
2763 __mdss_restore_sec_cfg(mdata);
2764
2765 if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC)) {
2766 mdata->default_panic_lut0 = readl_relaxed(mdata->mdp_base +
2767 MMSS_MDP_PANIC_LUT0);
2768 mdata->default_panic_lut1 = readl_relaxed(mdata->mdp_base +
2769 MMSS_MDP_PANIC_LUT1);
2770 mdata->default_robust_lut = readl_relaxed(mdata->mdp_base +
2771 MMSS_MDP_ROBUST_LUT);
2772 }
2773
2774 /*
2775 * Read the DISP_INTF_SEL register to check if display was enabled in
2776 * bootloader or not. If yes, let handoff handle removing the extra
2777 * clk/regulator votes else turn off clk/regulators because purpose
2778 * here is to get mdp_rev.
2779 */
2780 intf_sel = readl_relaxed(mdata->mdp_base +
2781 MDSS_MDP_REG_DISP_INTF_SEL);
2782 split_display = readl_relaxed(mdata->mdp_base +
2783 MDSS_MDP_REG_SPLIT_DISPLAY_EN);
2784 mdata->splash_intf_sel = intf_sel;
2785 mdata->splash_split_disp = split_display;
2786
2787 if (intf_sel != 0) {
2788 for (i = 0; i < 4; i++)
2789 if ((intf_sel >> i*8) & 0x000000FF)
2790 num_of_display_on++;
2791
2792 /*
2793 * For split display enabled - DSI0, DSI1 interfaces are
2794 * considered as single display. So decrement
2795 * 'num_of_display_on' by 1
2796 */
2797 if (split_display)
2798 num_of_display_on--;
2799 }
2800 if (!num_of_display_on) {
2801 mdss_mdp_footswitch_ctrl_splash(false);
2802 msm_bus_scale_client_update_request(
2803 mdata->bus_hdl, 0);
2804 mdata->ao_bw_uc_idx = 0;
2805 } else {
2806 mdata->handoff_pending = true;
2807 /*
2808 * If multiple displays are enabled in LK, ctrl_splash off will
2809 * be called multiple times during splash_cleanup. Need to
2810 * enable it symmetrically
2811 */
2812 for (i = 1; i < num_of_display_on; i++)
2813 mdss_mdp_footswitch_ctrl_splash(true);
2814 }
2815
2816 mdp_intr_cb = kcalloc(ARRAY_SIZE(mdp_irq_map),
2817 sizeof(struct intr_callback), GFP_KERNEL);
2818 if (mdp_intr_cb == NULL)
2819 return -ENOMEM;
2820
2821 mdss_res->mdp_irq_mask = kcalloc(ARRAY_SIZE(mdp_intr_reg),
2822 sizeof(u32), GFP_KERNEL);
2823 if (mdss_res->mdp_irq_mask == NULL)
2824 return -ENOMEM;
2825
2826 pr_info("mdss version = 0x%x, bootloader display is %s, num %d, intf_sel=0x%08x\n",
2827 mdata->mdp_rev, num_of_display_on ? "on" : "off",
2828 num_of_display_on, intf_sel);
2829
2830probe_done:
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302831 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302832 if (!num_of_display_on)
2833 mdss_mdp_footswitch_ctrl_splash(false);
2834
2835 if (mdata->regulator_notif_register)
2836 regulator_unregister_notifier(mdata->fs,
2837 &(mdata->gdsc_cb));
2838 mdss_mdp_hw.ptr = NULL;
2839 mdss_mdp_pp_term(&pdev->dev);
2840 mutex_destroy(&mdata->reg_lock);
2841 mdss_res = NULL;
2842 }
2843
2844 return rc;
2845}
2846
2847static void mdss_mdp_parse_dt_regs_array(const u32 *arr, struct dss_io_data *io,
2848 struct mdss_hw_settings *hws, int count)
2849{
2850 u32 len, reg;
2851 int i;
2852
2853 if (!arr)
2854 return;
2855
2856 for (i = 0, len = count * 2; i < len; i += 2) {
2857 reg = be32_to_cpu(arr[i]);
2858 if (reg >= io->len)
2859 continue;
2860
2861 hws->reg = io->base + reg;
2862 hws->val = be32_to_cpu(arr[i + 1]);
2863 pr_debug("reg: 0x%04x=0x%08x\n", reg, hws->val);
2864 hws++;
2865 }
2866}
2867
2868int mdss_mdp_parse_dt_hw_settings(struct platform_device *pdev)
2869{
2870 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2871 struct mdss_hw_settings *hws;
2872 const u32 *vbif_arr, *mdp_arr, *vbif_nrt_arr;
2873 int vbif_len, mdp_len, vbif_nrt_len;
2874
2875 vbif_arr = of_get_property(pdev->dev.of_node, "qcom,vbif-settings",
2876 &vbif_len);
2877 if (!vbif_arr || (vbif_len & 1)) {
2878 pr_debug("MDSS VBIF settings not found\n");
2879 vbif_len = 0;
2880 }
2881 vbif_len /= 2 * sizeof(u32);
2882
2883 vbif_nrt_arr = of_get_property(pdev->dev.of_node,
2884 "qcom,vbif-nrt-settings", &vbif_nrt_len);
2885 if (!vbif_nrt_arr || (vbif_nrt_len & 1)) {
2886 pr_debug("MDSS VBIF non-realtime settings not found\n");
2887 vbif_nrt_len = 0;
2888 }
2889 vbif_nrt_len /= 2 * sizeof(u32);
2890
2891 mdp_arr = of_get_property(pdev->dev.of_node, "qcom,mdp-settings",
2892 &mdp_len);
2893 if (!mdp_arr || (mdp_len & 1)) {
2894 pr_debug("MDSS MDP settings not found\n");
2895 mdp_len = 0;
2896 }
2897 mdp_len /= 2 * sizeof(u32);
2898
2899 if (!(mdp_len + vbif_len + vbif_nrt_len))
2900 return 0;
2901
2902 hws = devm_kzalloc(&pdev->dev, sizeof(*hws) * (vbif_len + mdp_len +
2903 vbif_nrt_len + 1), GFP_KERNEL);
2904 if (!hws)
2905 return -ENOMEM;
2906
2907 mdss_mdp_parse_dt_regs_array(vbif_arr, &mdata->vbif_io,
2908 hws, vbif_len);
2909 mdss_mdp_parse_dt_regs_array(vbif_nrt_arr, &mdata->vbif_nrt_io,
2910 hws, vbif_nrt_len);
2911 mdss_mdp_parse_dt_regs_array(mdp_arr, &mdata->mdss_io,
2912 hws + vbif_len, mdp_len);
2913
2914 mdata->hw_settings = hws;
2915
2916 return 0;
2917}
2918
2919static int mdss_mdp_parse_dt(struct platform_device *pdev)
2920{
2921 int rc, data;
2922 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2923
2924 rc = mdss_mdp_parse_dt_hw_settings(pdev);
2925 if (rc) {
2926 pr_err("Error in device tree : hw settings\n");
2927 return rc;
2928 }
2929
2930 rc = mdss_mdp_parse_dt_pipe(pdev);
2931 if (rc) {
2932 pr_err("Error in device tree : pipes\n");
2933 return rc;
2934 }
2935
2936 rc = mdss_mdp_parse_dt_mixer(pdev);
2937 if (rc) {
2938 pr_err("Error in device tree : mixers\n");
2939 return rc;
2940 }
2941
2942 rc = mdss_mdp_parse_dt_misc(pdev);
2943 if (rc) {
2944 pr_err("Error in device tree : misc\n");
2945 return rc;
2946 }
2947
2948 rc = mdss_mdp_parse_dt_wb(pdev);
2949 if (rc) {
2950 pr_err("Error in device tree : wb\n");
2951 return rc;
2952 }
2953
2954 rc = mdss_mdp_parse_dt_ctl(pdev);
2955 if (rc) {
2956 pr_err("Error in device tree : ctl\n");
2957 return rc;
2958 }
2959
2960 rc = mdss_mdp_parse_dt_video_intf(pdev);
2961 if (rc) {
2962 pr_err("Error in device tree : ctl\n");
2963 return rc;
2964 }
2965
2966 rc = mdss_mdp_parse_dt_smp(pdev);
2967 if (rc) {
2968 pr_err("Error in device tree : smp\n");
2969 return rc;
2970 }
2971
2972 rc = mdss_mdp_parse_dt_prefill(pdev);
2973 if (rc) {
2974 pr_err("Error in device tree : prefill\n");
2975 return rc;
2976 }
2977
2978 rc = mdss_mdp_parse_dt_ad_cfg(pdev);
2979 if (rc) {
2980 pr_err("Error in device tree : ad\n");
2981 return rc;
2982 }
2983
2984 rc = mdss_mdp_parse_dt_cdm(pdev);
2985 if (rc)
2986 pr_debug("CDM offset not found in device tree\n");
2987
2988 rc = mdss_mdp_parse_dt_dsc(pdev);
2989 if (rc)
2990 pr_debug("DSC offset not found in device tree\n");
2991
2992 /* Parse the mdp specific register base offset*/
2993 rc = of_property_read_u32(pdev->dev.of_node,
2994 "qcom,mdss-mdp-reg-offset", &data);
2995 if (rc) {
2996 pr_err("Error in device tree : mdp reg base\n");
2997 return rc;
2998 }
2999 mdata->mdp_base = mdata->mdss_io.base + data;
3000 return 0;
3001}
3002
3003static void mdss_mdp_parse_dt_pipe_sw_reset(struct platform_device *pdev,
3004 u32 reg_off, char *prop_name, struct mdss_mdp_pipe *pipe_list,
3005 u32 npipes)
3006{
3007 int len;
3008 const u32 *arr;
3009
3010 arr = of_get_property(pdev->dev.of_node, prop_name, &len);
3011 if (arr) {
3012 int i;
3013
3014 len /= sizeof(u32);
3015 if (len != npipes) {
3016 pr_err("%s: invalid sw_reset entries req:%d found:%d\n",
3017 prop_name, len, npipes);
3018 return;
3019 }
3020
3021 for (i = 0; i < len; i++) {
3022 pipe_list[i].sw_reset.reg_off = reg_off;
3023 pipe_list[i].sw_reset.bit_off = be32_to_cpu(arr[i]);
3024
3025 pr_debug("%s[%d]: sw_reset: reg_off:0x%x bit_off:%d\n",
3026 prop_name, i, reg_off, be32_to_cpu(arr[i]));
3027 }
3028 }
3029}
3030
3031static int mdss_mdp_parse_dt_pipe_clk_ctrl(struct platform_device *pdev,
3032 char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
3033{
3034 int rc = 0, len;
3035 const u32 *arr;
3036
3037 arr = of_get_property(pdev->dev.of_node, prop_name, &len);
3038 if (arr) {
3039 int i, j;
3040
3041 len /= sizeof(u32);
3042 for (i = 0, j = 0; i < len; j++) {
3043 struct mdss_mdp_pipe *pipe = NULL;
3044
3045 if (j >= npipes) {
3046 pr_err("invalid clk ctrl enries for prop: %s\n",
3047 prop_name);
3048 return -EINVAL;
3049 }
3050
3051 pipe = &pipe_list[j];
3052
3053 pipe->clk_ctrl.reg_off = be32_to_cpu(arr[i++]);
3054 pipe->clk_ctrl.bit_off = be32_to_cpu(arr[i++]);
3055
3056 /* status register is next in line to ctrl register */
3057 pipe->clk_status.reg_off = pipe->clk_ctrl.reg_off + 4;
3058 pipe->clk_status.bit_off = be32_to_cpu(arr[i++]);
3059
3060 pr_debug("%s[%d]: ctrl: reg_off: 0x%x bit_off: %d\n",
3061 prop_name, j, pipe->clk_ctrl.reg_off,
3062 pipe->clk_ctrl.bit_off);
3063 pr_debug("%s[%d]: status: reg_off: 0x%x bit_off: %d\n",
3064 prop_name, j, pipe->clk_status.reg_off,
3065 pipe->clk_status.bit_off);
3066 }
3067 if (j != npipes) {
3068 pr_err("%s: %d entries found. required %d\n",
3069 prop_name, j, npipes);
3070 for (i = 0; i < npipes; i++) {
3071 memset(&pipe_list[i].clk_ctrl, 0,
3072 sizeof(pipe_list[i].clk_ctrl));
3073 memset(&pipe_list[i].clk_status, 0,
3074 sizeof(pipe_list[i].clk_status));
3075 }
3076 rc = -EINVAL;
3077 }
3078 } else {
3079 pr_err("error mandatory property '%s' not found\n", prop_name);
3080 rc = -EINVAL;
3081 }
3082
3083 return rc;
3084}
3085
3086static void mdss_mdp_parse_dt_pipe_panic_ctrl(struct platform_device *pdev,
3087 char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
3088{
3089 int i, j;
3090 int len;
3091 const u32 *arr;
3092 struct mdss_mdp_pipe *pipe = NULL;
3093
3094 arr = of_get_property(pdev->dev.of_node, prop_name, &len);
3095 if (arr) {
3096 len /= sizeof(u32);
3097 for (i = 0, j = 0; i < len; j++) {
3098 if (j >= npipes) {
3099 pr_err("invalid panic ctrl enries for prop: %s\n",
3100 prop_name);
3101 return;
3102 }
3103
3104 pipe = &pipe_list[j];
3105 pipe->panic_ctrl_ndx = be32_to_cpu(arr[i++]);
3106 }
3107 if (j != npipes)
3108 pr_err("%s: %d entries found. required %d\n",
3109 prop_name, j, npipes);
3110 } else {
3111 pr_debug("panic ctrl enabled but property '%s' not found\n",
3112 prop_name);
3113 }
3114}
3115
3116static int mdss_mdp_parse_dt_pipe_helper(struct platform_device *pdev,
3117 u32 ptype, char *ptypestr,
3118 struct mdss_mdp_pipe **out_plist,
3119 size_t len,
3120 u8 priority_base)
3121{
3122 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3123 u32 offsets[MDSS_MDP_MAX_SSPP];
3124 u32 ftch_id[MDSS_MDP_MAX_SSPP];
3125 u32 xin_id[MDSS_MDP_MAX_SSPP];
3126 u32 pnums[MDSS_MDP_MAX_SSPP];
3127 struct mdss_mdp_pipe *pipe_list;
3128 char prop_name[64];
3129 int i, cnt, rc;
3130 u32 rects_per_sspp;
3131
3132 if (!out_plist)
3133 return -EINVAL;
3134
3135 for (i = 0, cnt = 0; i < MDSS_MDP_MAX_SSPP && cnt < len; i++) {
3136 if (ptype == get_pipe_type_from_num(i)) {
3137 pnums[cnt] = i;
3138 cnt++;
3139 }
3140 }
3141
3142 if (cnt < len)
3143 pr_warn("Invalid %s pipe count: %zu, max supported: %d\n",
3144 ptypestr, len, cnt);
3145 if (cnt == 0) {
3146 *out_plist = NULL;
3147
3148 return 0;
3149 }
3150
3151 /* by default works in single rect mode unless otherwise noted */
3152 rects_per_sspp = mdata->rects_per_sspp[ptype] ? : 1;
3153
3154 pipe_list = devm_kzalloc(&pdev->dev,
3155 (sizeof(struct mdss_mdp_pipe) * cnt * rects_per_sspp),
3156 GFP_KERNEL);
3157 if (!pipe_list)
3158 return -ENOMEM;
3159
3160 if (mdata->has_pixel_ram || (ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) {
3161 for (i = 0; i < cnt; i++)
3162 ftch_id[i] = -1;
3163 } else {
3164 snprintf(prop_name, sizeof(prop_name),
3165 "qcom,mdss-pipe-%s-fetch-id", ptypestr);
3166 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, ftch_id,
3167 cnt);
3168 if (rc)
3169 goto parse_fail;
3170 }
3171
3172 snprintf(prop_name, sizeof(prop_name),
3173 "qcom,mdss-pipe-%s-xin-id", ptypestr);
3174 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, xin_id, cnt);
3175 if (rc)
3176 goto parse_fail;
3177
3178 snprintf(prop_name, sizeof(prop_name),
3179 "qcom,mdss-pipe-%s-off", ptypestr);
3180 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, offsets, cnt);
3181 if (rc)
3182 goto parse_fail;
3183
3184 rc = mdss_mdp_pipe_addr_setup(mdata, pipe_list, offsets, ftch_id,
3185 xin_id, ptype, pnums, cnt, rects_per_sspp,
3186 priority_base);
3187 if (rc)
3188 goto parse_fail;
3189
3190 snprintf(prop_name, sizeof(prop_name),
3191 "qcom,mdss-pipe-%s-clk-ctrl-offsets", ptypestr);
3192 rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev, prop_name,
3193 pipe_list, cnt);
3194 if (rc)
3195 goto parse_fail;
3196
3197 *out_plist = pipe_list;
3198
3199 return cnt;
3200parse_fail:
3201 devm_kfree(&pdev->dev, pipe_list);
3202
3203 return rc;
3204}
3205
3206static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev)
3207{
3208 int rc = 0;
3209 u32 nfids = 0, len, nxids = 0, npipes = 0;
3210 u32 sw_reset_offset = 0;
3211 u32 data[4];
3212
3213 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3214
3215 mdata->has_pixel_ram = !mdss_mdp_parse_dt_prop_len(pdev,
3216 "qcom,mdss-smp-data");
3217
3218 mdata->nvig_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3219 "qcom,mdss-pipe-vig-off");
3220 mdata->nrgb_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3221 "qcom,mdss-pipe-rgb-off");
3222 mdata->ndma_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3223 "qcom,mdss-pipe-dma-off");
3224 mdata->ncursor_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3225 "qcom,mdss-pipe-cursor-off");
3226
3227 npipes = mdata->nvig_pipes + mdata->nrgb_pipes + mdata->ndma_pipes;
3228
3229 if (!mdata->has_pixel_ram) {
3230 nfids += mdss_mdp_parse_dt_prop_len(pdev,
3231 "qcom,mdss-pipe-vig-fetch-id");
3232 nfids += mdss_mdp_parse_dt_prop_len(pdev,
3233 "qcom,mdss-pipe-rgb-fetch-id");
3234 nfids += mdss_mdp_parse_dt_prop_len(pdev,
3235 "qcom,mdss-pipe-dma-fetch-id");
3236 if (npipes != nfids) {
3237 pr_err("device tree err: unequal number of pipes and smp ids");
3238 return -EINVAL;
3239 }
3240 }
3241
3242 if (mdata->nvig_pipes)
3243 nxids += mdss_mdp_parse_dt_prop_len(pdev,
3244 "qcom,mdss-pipe-vig-xin-id");
3245 if (mdata->nrgb_pipes)
3246 nxids += mdss_mdp_parse_dt_prop_len(pdev,
3247 "qcom,mdss-pipe-rgb-xin-id");
3248 if (mdata->ndma_pipes)
3249 nxids += mdss_mdp_parse_dt_prop_len(pdev,
3250 "qcom,mdss-pipe-dma-xin-id");
3251 if (npipes != nxids) {
3252 pr_err("device tree err: unequal number of pipes and xin ids\n");
3253 return -EINVAL;
3254 }
3255
3256 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_VIG, "vig",
3257 &mdata->vig_pipes, mdata->nvig_pipes, 0);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303258 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303259 goto parse_fail;
3260 mdata->nvig_pipes = rc;
3261
3262 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_RGB, "rgb",
3263 &mdata->rgb_pipes, mdata->nrgb_pipes,
3264 mdata->nvig_pipes);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303265 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303266 goto parse_fail;
3267 mdata->nrgb_pipes = rc;
3268
3269 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_DMA, "dma",
3270 &mdata->dma_pipes, mdata->ndma_pipes,
3271 mdata->nvig_pipes + mdata->nrgb_pipes);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303272 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303273 goto parse_fail;
3274 mdata->ndma_pipes = rc;
3275
3276 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_CURSOR,
3277 "cursor", &mdata->cursor_pipes, mdata->ncursor_pipes,
3278 0);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303279 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303280 goto parse_fail;
3281 mdata->ncursor_pipes = rc;
3282
3283 rc = 0;
3284
3285 mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-sw-reset-off",
3286 &sw_reset_offset, 1);
3287 if (sw_reset_offset) {
3288 if (mdata->vig_pipes)
3289 mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
3290 "qcom,mdss-pipe-vig-sw-reset-map",
3291 mdata->vig_pipes, mdata->nvig_pipes);
3292 if (mdata->rgb_pipes)
3293 mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
3294 "qcom,mdss-pipe-rgb-sw-reset-map",
3295 mdata->rgb_pipes, mdata->nrgb_pipes);
3296 if (mdata->dma_pipes)
3297 mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
3298 "qcom,mdss-pipe-dma-sw-reset-map",
3299 mdata->dma_pipes, mdata->ndma_pipes);
3300 }
3301
3302 mdata->has_panic_ctrl = of_property_read_bool(pdev->dev.of_node,
3303 "qcom,mdss-has-panic-ctrl");
3304 if (mdata->has_panic_ctrl) {
3305 if (mdata->vig_pipes)
3306 mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
3307 "qcom,mdss-pipe-vig-panic-ctrl-offsets",
3308 mdata->vig_pipes, mdata->nvig_pipes);
3309 if (mdata->rgb_pipes)
3310 mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
3311 "qcom,mdss-pipe-rgb-panic-ctrl-offsets",
3312 mdata->rgb_pipes, mdata->nrgb_pipes);
3313 if (mdata->dma_pipes)
3314 mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
3315 "qcom,mdss-pipe-dma-panic-ctrl-offsets",
3316 mdata->dma_pipes, mdata->ndma_pipes);
3317 }
3318
3319 len = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-per-pipe-panic-luts");
3320 if (len != 4) {
3321 pr_debug("Unable to read per-pipe-panic-luts\n");
3322 } else {
3323 rc = mdss_mdp_parse_dt_handler(pdev,
3324 "qcom,mdss-per-pipe-panic-luts", data, len);
3325 mdata->default_panic_lut_per_pipe_linear = data[0];
3326 mdata->default_panic_lut_per_pipe_tile = data[1];
3327 mdata->default_robust_lut_per_pipe_linear = data[2];
3328 mdata->default_robust_lut_per_pipe_tile = data[3];
3329 pr_debug("per pipe panic lut [0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x\n",
3330 data[0], data[1], data[2], data[3]);
3331 }
3332
3333parse_fail:
3334 return rc;
3335}
3336
3337static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev)
3338{
3339
3340 u32 nmixers, npingpong;
3341 int rc = 0;
3342 u32 *mixer_offsets = NULL, *dspp_offsets = NULL,
3343 *pingpong_offsets = NULL;
3344 u32 is_virtual_mixer_req = false;
3345
3346 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3347
3348 mdata->nmixers_intf = mdss_mdp_parse_dt_prop_len(pdev,
3349 "qcom,mdss-mixer-intf-off");
3350 mdata->nmixers_wb = mdss_mdp_parse_dt_prop_len(pdev,
3351 "qcom,mdss-mixer-wb-off");
3352 mdata->ndspp = mdss_mdp_parse_dt_prop_len(pdev,
3353 "qcom,mdss-dspp-off");
3354 npingpong = mdss_mdp_parse_dt_prop_len(pdev,
3355 "qcom,mdss-pingpong-off");
3356 nmixers = mdata->nmixers_intf + mdata->nmixers_wb;
3357
3358 rc = of_property_read_u32(pdev->dev.of_node,
3359 "qcom,max-mixer-width", &mdata->max_mixer_width);
3360 if (rc) {
3361 pr_err("device tree err: failed to get max mixer width\n");
3362 return -EINVAL;
3363 }
3364
3365 if (mdata->nmixers_intf < mdata->ndspp) {
3366 pr_err("device tree err: no of dspp are greater than intf mixers\n");
3367 return -EINVAL;
3368 }
3369
3370 if (mdata->nmixers_intf != npingpong) {
3371 pr_err("device tree err: unequal no of pingpong and intf mixers\n");
3372 return -EINVAL;
3373 }
3374
3375 mixer_offsets = kcalloc(nmixers, sizeof(u32), GFP_KERNEL);
3376 if (!mixer_offsets)
3377 return -ENOMEM;
3378
3379 dspp_offsets = kcalloc(mdata->ndspp, sizeof(u32), GFP_KERNEL);
3380 if (!dspp_offsets) {
3381 rc = -ENOMEM;
3382 goto dspp_alloc_fail;
3383 }
3384 pingpong_offsets = kcalloc(npingpong, sizeof(u32), GFP_KERNEL);
3385 if (!pingpong_offsets) {
3386 rc = -ENOMEM;
3387 goto pingpong_alloc_fail;
3388 }
3389
3390 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-intf-off",
3391 mixer_offsets, mdata->nmixers_intf);
3392 if (rc)
3393 goto parse_done;
3394
3395 mdata->has_separate_rotator = of_property_read_bool(pdev->dev.of_node,
3396 "qcom,mdss-has-separate-rotator");
3397 if (mdata->nmixers_wb) {
3398 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-wb-off",
3399 mixer_offsets + mdata->nmixers_intf,
3400 mdata->nmixers_wb);
3401 if (rc)
3402 goto parse_done;
3403 } else if (!mdata->has_separate_rotator) {
3404 /*
3405 * If writeback mixers are not available, put the number of
3406 * writeback mixers equal to number of DMA pipes so that
3407 * later same number of virtual writeback mixers can be
3408 * allocated.
3409 */
3410 mdata->nmixers_wb = mdata->ndma_pipes;
3411 is_virtual_mixer_req = true;
3412 }
3413
3414 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dspp-off",
3415 dspp_offsets, mdata->ndspp);
3416 if (rc)
3417 goto parse_done;
3418
3419 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pingpong-off",
3420 pingpong_offsets, npingpong);
3421 if (rc)
3422 goto parse_done;
3423
3424 rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets,
3425 dspp_offsets, pingpong_offsets,
3426 MDSS_MDP_MIXER_TYPE_INTF, mdata->nmixers_intf);
3427 if (rc)
3428 goto parse_done;
3429
3430 if (mdata->nmixers_wb) {
3431 if (is_virtual_mixer_req) {
3432 /*
3433 * Replicate last interface mixers based on number of
3434 * dma pipes available as virtual writeback mixers.
3435 */
3436 rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
3437 mdata->nmixers_intf - mdata->ndma_pipes,
3438 NULL, NULL, MDSS_MDP_MIXER_TYPE_WRITEBACK,
3439 mdata->nmixers_wb);
3440 if (rc)
3441 goto parse_done;
3442 } else {
3443 rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
3444 mdata->nmixers_intf, NULL, NULL,
3445 MDSS_MDP_MIXER_TYPE_WRITEBACK,
3446 mdata->nmixers_wb);
3447 if (rc)
3448 goto parse_done;
3449 }
3450 }
3451
3452parse_done:
3453 kfree(pingpong_offsets);
3454pingpong_alloc_fail:
3455 kfree(dspp_offsets);
3456dspp_alloc_fail:
3457 kfree(mixer_offsets);
3458
3459 return rc;
3460}
3461
3462static int mdss_mdp_cdm_addr_setup(struct mdss_data_type *mdata,
3463 u32 *cdm_offsets, u32 len)
3464{
3465 struct mdss_mdp_cdm *head;
3466 u32 i = 0;
3467
3468 head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_cdm) *
3469 len, GFP_KERNEL);
3470 if (!head)
3471 return -ENOMEM;
3472
3473 for (i = 0; i < len; i++) {
3474 head[i].num = i;
3475 head[i].base = (mdata->mdss_io.base) + cdm_offsets[i];
3476 atomic_set(&head[i].kref.refcount, 0);
3477 mutex_init(&head[i].lock);
3478 init_completion(&head[i].free_comp);
3479 pr_debug("%s: cdm off (%d) = %pK\n", __func__, i, head[i].base);
3480 }
3481
3482 mdata->cdm_off = head;
3483 mutex_init(&mdata->cdm_lock);
3484 return 0;
3485}
3486
3487static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev)
3488{
3489 int rc = 0;
3490 u32 *cdm_offsets = NULL;
3491 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3492
3493 mdata->ncdm = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-cdm-off");
3494
3495 if (!mdata->ncdm) {
3496 rc = 0;
3497 pr_debug("%s: No CDM offsets present in DT\n", __func__);
3498 goto end;
3499 }
3500 pr_debug("%s: cdm len == %d\n", __func__, mdata->ncdm);
3501 cdm_offsets = kcalloc(mdata->ncdm, sizeof(u32), GFP_KERNEL);
3502 if (!cdm_offsets) {
3503 rc = -ENOMEM;
3504 mdata->ncdm = 0;
3505 goto end;
3506 }
3507
3508 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-cdm-off", cdm_offsets,
3509 mdata->ncdm);
3510 if (rc) {
3511 pr_err("device tree err: failed to get cdm offsets\n");
3512 goto fail;
3513 }
3514
3515 rc = mdss_mdp_cdm_addr_setup(mdata, cdm_offsets, mdata->ncdm);
3516 if (rc) {
3517 pr_err("%s: CDM address setup failed\n", __func__);
3518 goto fail;
3519 }
3520
3521fail:
3522 kfree(cdm_offsets);
3523 if (rc)
3524 mdata->ncdm = 0;
3525end:
3526 return rc;
3527}
3528
3529static int mdss_mdp_dsc_addr_setup(struct mdss_data_type *mdata,
3530 u32 *dsc_offsets, u32 len)
3531{
3532 struct mdss_mdp_dsc *head;
3533 u32 i = 0;
3534
3535 head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_dsc) *
3536 len, GFP_KERNEL);
3537 if (!head)
3538 return -ENOMEM;
3539
3540 for (i = 0; i < len; i++) {
3541 head[i].num = i;
3542 head[i].base = (mdata->mdss_io.base) + dsc_offsets[i];
3543 pr_debug("dsc off (%d) = %pK\n", i, head[i].base);
3544 }
3545
3546 mdata->dsc_off = head;
3547 return 0;
3548}
3549
3550static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev)
3551{
3552 int rc = 0;
3553 u32 *dsc_offsets = NULL;
3554 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3555
3556 mdata->ndsc = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-dsc-off");
3557 if (!mdata->ndsc) {
3558 rc = 0;
3559 pr_debug("No DSC offsets present in DT\n");
3560 goto end;
3561 }
3562 pr_debug("dsc len == %d\n", mdata->ndsc);
3563
3564 dsc_offsets = kcalloc(mdata->ndsc, sizeof(u32), GFP_KERNEL);
3565 if (!dsc_offsets) {
3566 rc = -ENOMEM;
3567 mdata->ndsc = 0;
3568 goto end;
3569 }
3570
3571 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dsc-off", dsc_offsets,
3572 mdata->ndsc);
3573 if (rc) {
3574 pr_err("device tree err: failed to get cdm offsets\n");
3575 goto fail;
3576 }
3577
3578 rc = mdss_mdp_dsc_addr_setup(mdata, dsc_offsets, mdata->ndsc);
3579 if (rc) {
3580 pr_err("%s: DSC address setup failed\n", __func__);
3581 goto fail;
3582 }
3583
3584fail:
3585 kfree(dsc_offsets);
3586 if (rc)
3587 mdata->ndsc = 0;
3588end:
3589 return rc;
3590}
3591
3592static int mdss_mdp_parse_dt_wb(struct platform_device *pdev)
3593{
3594 int rc = 0;
3595 u32 *wb_offsets = NULL;
3596 u32 num_wb_mixer, nwb_offsets, num_intf_wb = 0;
3597 const char *wfd_data;
3598 struct mdss_data_type *mdata;
3599
3600 mdata = platform_get_drvdata(pdev);
3601
3602 num_wb_mixer = mdata->nmixers_wb;
3603
3604 wfd_data = of_get_property(pdev->dev.of_node,
3605 "qcom,mdss-wfd-mode", NULL);
3606 if (wfd_data && strcmp(wfd_data, "shared") != 0)
3607 num_intf_wb = 1;
3608
3609 nwb_offsets = mdss_mdp_parse_dt_prop_len(pdev,
3610 "qcom,mdss-wb-off");
3611
3612 wb_offsets = kcalloc(nwb_offsets, sizeof(u32), GFP_KERNEL);
3613 if (!wb_offsets)
3614 return -ENOMEM;
3615
3616 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-wb-off",
3617 wb_offsets, nwb_offsets);
3618 if (rc)
3619 goto wb_parse_done;
3620
3621 rc = mdss_mdp_wb_addr_setup(mdata, num_wb_mixer, num_intf_wb);
3622 if (rc)
3623 goto wb_parse_done;
3624
3625 mdata->nwb_offsets = nwb_offsets;
3626 mdata->wb_offsets = wb_offsets;
3627
3628 return 0;
3629
3630wb_parse_done:
3631 kfree(wb_offsets);
3632 return rc;
3633}
3634
3635static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev)
3636{
3637 int rc = 0;
3638 u32 *ctl_offsets = NULL;
3639
3640 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3641
3642 mdata->nctl = mdss_mdp_parse_dt_prop_len(pdev,
3643 "qcom,mdss-ctl-off");
3644
3645 if (mdata->nctl < mdata->nwb) {
3646 pr_err("device tree err: number of ctl greater than wb\n");
3647 rc = -EINVAL;
3648 goto parse_done;
3649 }
3650
3651 ctl_offsets = kcalloc(mdata->nctl, sizeof(u32), GFP_KERNEL);
3652 if (!ctl_offsets)
3653 return -ENOMEM;
3654
3655 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ctl-off",
3656 ctl_offsets, mdata->nctl);
3657 if (rc)
3658 goto parse_done;
3659
3660 rc = mdss_mdp_ctl_addr_setup(mdata, ctl_offsets, mdata->nctl);
3661 if (rc)
3662 goto parse_done;
3663
3664parse_done:
3665 kfree(ctl_offsets);
3666
3667 return rc;
3668}
3669
3670static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev)
3671{
3672 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3673 u32 count;
3674 u32 *offsets;
3675 int rc;
3676
3677
3678 count = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-intf-off");
3679 if (count == 0)
3680 return -EINVAL;
3681
3682 offsets = kcalloc(count, sizeof(u32), GFP_KERNEL);
3683 if (!offsets)
3684 return -ENOMEM;
3685
3686 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-intf-off",
3687 offsets, count);
3688 if (rc)
3689 goto parse_fail;
3690
3691 rc = mdss_mdp_video_addr_setup(mdata, offsets, count);
3692 if (rc)
3693 pr_err("unable to setup video interfaces\n");
3694
3695parse_fail:
3696 kfree(offsets);
3697
3698 return rc;
3699}
3700
3701static int mdss_mdp_update_smp_map(struct platform_device *pdev,
3702 const u32 *data, int len, int pipe_cnt,
3703 struct mdss_mdp_pipe *pipes)
3704{
3705 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3706 int i, j, k;
3707 u32 cnt, mmb;
3708
3709 len /= sizeof(u32);
3710 for (i = 0, k = 0; i < len; k++) {
3711 struct mdss_mdp_pipe *pipe = NULL;
3712
3713 if (k >= pipe_cnt) {
3714 pr_err("invalid fixed mmbs\n");
3715 return -EINVAL;
3716 }
3717
3718 pipe = &pipes[k];
3719
3720 cnt = be32_to_cpu(data[i++]);
3721 if (cnt == 0)
3722 continue;
3723
3724 for (j = 0; j < cnt; j++) {
3725 mmb = be32_to_cpu(data[i++]);
3726 if (mmb > mdata->smp_mb_cnt) {
3727 pr_err("overflow mmb:%d pipe:%d: max:%d\n",
3728 mmb, k, mdata->smp_mb_cnt);
3729 return -EINVAL;
3730 }
3731 set_bit(mmb, pipe->smp_map[0].fixed);
3732 }
3733 if (bitmap_intersects(pipe->smp_map[0].fixed,
3734 mdata->mmb_alloc_map,
3735 mdata->smp_mb_cnt)) {
3736 pr_err("overlapping fixed mmb map\n");
3737 return -EINVAL;
3738 }
3739 bitmap_or(mdata->mmb_alloc_map, pipe->smp_map[0].fixed,
3740 mdata->mmb_alloc_map, mdata->smp_mb_cnt);
3741 }
3742 return 0;
3743}
3744
3745static int mdss_mdp_parse_dt_smp(struct platform_device *pdev)
3746{
3747 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3748 u32 num;
3749 u32 data[2];
3750 int rc, len;
3751 const u32 *arr;
3752
3753 num = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-smp-data");
3754 /*
3755 * This property is optional for targets with fix pixel ram. Rest
3756 * must provide no. of smp and size of each block.
3757 */
3758 if (!num)
3759 return 0;
3760 else if (num != 2)
3761 return -EINVAL;
3762
3763 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-smp-data", data, num);
3764 if (rc)
3765 return rc;
3766
3767 rc = mdss_mdp_smp_setup(mdata, data[0], data[1]);
3768
3769 if (rc) {
3770 pr_err("unable to setup smp data\n");
3771 return rc;
3772 }
3773
3774 rc = of_property_read_u32(pdev->dev.of_node,
3775 "qcom,mdss-smp-mb-per-pipe", data);
3776 mdata->smp_mb_per_pipe = (!rc ? data[0] : 0);
3777
3778 rc = 0;
3779 arr = of_get_property(pdev->dev.of_node,
3780 "qcom,mdss-pipe-rgb-fixed-mmb", &len);
3781 if (arr) {
3782 rc = mdss_mdp_update_smp_map(pdev, arr, len,
3783 mdata->nrgb_pipes, mdata->rgb_pipes);
3784
3785 if (rc)
3786 pr_warn("unable to update smp map for RGB pipes\n");
3787 }
3788
3789 arr = of_get_property(pdev->dev.of_node,
3790 "qcom,mdss-pipe-vig-fixed-mmb", &len);
3791 if (arr) {
3792 rc = mdss_mdp_update_smp_map(pdev, arr, len,
3793 mdata->nvig_pipes, mdata->vig_pipes);
3794
3795 if (rc)
3796 pr_warn("unable to update smp map for VIG pipes\n");
3797 }
3798 return rc;
3799}
3800
3801static void mdss_mdp_parse_dt_fudge_factors(struct platform_device *pdev,
3802 char *prop_name, struct mult_factor *ff)
3803{
3804 int rc;
3805 u32 data[2] = {1, 1};
3806
3807 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, data, 2);
3808 if (rc) {
3809 pr_debug("err reading %s\n", prop_name);
3810 } else {
3811 ff->numer = data[0];
3812 ff->denom = data[1];
3813 }
3814}
3815
3816static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev)
3817{
3818 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3819 struct mdss_prefill_data *prefill = &mdata->prefill_data;
3820 int rc;
3821
3822 rc = of_property_read_u32(pdev->dev.of_node,
3823 "qcom,mdss-prefill-outstanding-buffer-bytes",
3824 &prefill->ot_bytes);
3825 if (rc) {
3826 pr_err("prefill outstanding buffer bytes not specified\n");
3827 return rc;
3828 }
3829
3830 rc = of_property_read_u32(pdev->dev.of_node,
3831 "qcom,mdss-prefill-y-buffer-bytes", &prefill->y_buf_bytes);
3832 if (rc) {
3833 pr_err("prefill y buffer bytes not specified\n");
3834 return rc;
3835 }
3836
3837 rc = of_property_read_u32(pdev->dev.of_node,
3838 "qcom,mdss-prefill-scaler-buffer-lines-bilinear",
3839 &prefill->y_scaler_lines_bilinear);
3840 if (rc) {
3841 pr_err("prefill scaler lines for bilinear not specified\n");
3842 return rc;
3843 }
3844
3845 rc = of_property_read_u32(pdev->dev.of_node,
3846 "qcom,mdss-prefill-scaler-buffer-lines-caf",
3847 &prefill->y_scaler_lines_caf);
3848 if (rc) {
3849 pr_debug("prefill scaler lines for caf not specified\n");
3850 return rc;
3851 }
3852
3853 rc = of_property_read_u32(pdev->dev.of_node,
3854 "qcom,mdss-prefill-post-scaler-buffer-pixels",
3855 &prefill->post_scaler_pixels);
3856 if (rc) {
3857 pr_err("prefill post scaler buffer pixels not specified\n");
3858 return rc;
3859 }
3860
3861 rc = of_property_read_u32(pdev->dev.of_node,
3862 "qcom,mdss-prefill-pingpong-buffer-pixels",
3863 &prefill->pp_pixels);
3864 if (rc) {
3865 pr_err("prefill pingpong buffer lines not specified\n");
3866 return rc;
3867 }
3868
3869 rc = of_property_read_u32(pdev->dev.of_node,
3870 "qcom,mdss-prefill-fbc-lines", &prefill->fbc_lines);
3871 if (rc)
3872 pr_debug("prefill FBC lines not specified\n");
3873
3874 return 0;
3875}
3876
3877static void mdss_mdp_parse_vbif_qos(struct platform_device *pdev)
3878{
3879 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3880 int rc;
3881
3882 mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
3883 "qcom,mdss-vbif-qos-rt-setting");
3884 if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
3885 mdata->vbif_rt_qos = kcalloc(mdata->npriority_lvl,
3886 sizeof(u32), GFP_KERNEL);
3887 if (!mdata->vbif_rt_qos)
3888 return;
3889
3890 rc = mdss_mdp_parse_dt_handler(pdev,
3891 "qcom,mdss-vbif-qos-rt-setting",
3892 mdata->vbif_rt_qos, mdata->npriority_lvl);
3893 if (rc) {
3894 pr_debug("rt setting not found\n");
3895 return;
3896 }
3897 } else {
3898 mdata->npriority_lvl = 0;
3899 pr_debug("Invalid or no vbif qos rt setting\n");
3900 return;
3901 }
3902
3903 mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
3904 "qcom,mdss-vbif-qos-nrt-setting");
3905 if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
3906 mdata->vbif_nrt_qos = kcalloc(mdata->npriority_lvl,
3907 sizeof(u32), GFP_KERNEL);
3908 if (!mdata->vbif_nrt_qos)
3909 return;
3910
3911 rc = mdss_mdp_parse_dt_handler(pdev,
3912 "qcom,mdss-vbif-qos-nrt-setting", mdata->vbif_nrt_qos,
3913 mdata->npriority_lvl);
3914 if (rc) {
3915 pr_debug("nrt setting not found\n");
3916 return;
3917 }
3918 } else {
3919 mdata->npriority_lvl = 0;
3920 pr_debug("Invalid or no vbif qos nrt seting\n");
3921 }
3922}
3923
3924static void mdss_mdp_parse_max_bw_array(const u32 *arr,
3925 struct mdss_max_bw_settings *max_bw_settings, int count)
3926{
3927 int i;
3928
3929 for (i = 0; i < count; i++) {
3930 max_bw_settings->mdss_max_bw_mode = be32_to_cpu(arr[i*2]);
3931 max_bw_settings->mdss_max_bw_val = be32_to_cpu(arr[(i*2)+1]);
3932 max_bw_settings++;
3933 }
3934}
3935
3936static void mdss_mdp_parse_max_bandwidth(struct platform_device *pdev)
3937{
3938 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3939 struct mdss_max_bw_settings *max_bw_settings;
3940 int max_bw_settings_cnt = 0;
3941 const u32 *max_bw;
3942
3943 max_bw = of_get_property(pdev->dev.of_node, "qcom,max-bw-settings",
3944 &max_bw_settings_cnt);
3945
3946 if (!max_bw || !max_bw_settings_cnt) {
3947 pr_debug("MDSS max bandwidth settings not found\n");
3948 return;
3949 }
3950
3951 max_bw_settings_cnt /= 2 * sizeof(u32);
3952
3953 max_bw_settings = devm_kzalloc(&pdev->dev, sizeof(*max_bw_settings)
3954 * max_bw_settings_cnt, GFP_KERNEL);
3955 if (!max_bw_settings)
3956 return;
3957
3958 mdss_mdp_parse_max_bw_array(max_bw, max_bw_settings,
3959 max_bw_settings_cnt);
3960
3961 mdata->max_bw_settings = max_bw_settings;
3962 mdata->max_bw_settings_cnt = max_bw_settings_cnt;
3963}
3964
3965static void mdss_mdp_parse_per_pipe_bandwidth(struct platform_device *pdev)
3966{
3967
3968 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3969 struct mdss_max_bw_settings *max_bw_per_pipe_settings;
3970 int max_bw_settings_cnt = 0;
3971 const u32 *max_bw_settings;
3972 u32 max_bw, min_bw, threshold, i = 0;
3973
3974 max_bw_settings = of_get_property(pdev->dev.of_node,
3975 "qcom,max-bandwidth-per-pipe-kbps",
3976 &max_bw_settings_cnt);
3977
3978 if (!max_bw_settings || !max_bw_settings_cnt) {
3979 pr_debug("MDSS per pipe max bandwidth settings not found\n");
3980 return;
3981 }
3982
3983 /* Support targets where a common per pipe max bw is provided */
3984 if ((max_bw_settings_cnt / sizeof(u32)) == 1) {
3985 mdata->max_bw_per_pipe = be32_to_cpu(max_bw_settings[0]);
3986 mdata->max_per_pipe_bw_settings = NULL;
3987 pr_debug("Common per pipe max bandwidth provided\n");
3988 return;
3989 }
3990
3991 max_bw_settings_cnt /= 2 * sizeof(u32);
3992
3993 max_bw_per_pipe_settings = devm_kzalloc(&pdev->dev,
3994 sizeof(struct mdss_max_bw_settings) * max_bw_settings_cnt,
3995 GFP_KERNEL);
3996 if (!max_bw_per_pipe_settings) {
3997 pr_err("Memory allocation failed for max_bw_settings\n");
3998 return;
3999 }
4000
4001 mdss_mdp_parse_max_bw_array(max_bw_settings, max_bw_per_pipe_settings,
4002 max_bw_settings_cnt);
4003 mdata->max_per_pipe_bw_settings = max_bw_per_pipe_settings;
4004 mdata->mdss_per_pipe_bw_cnt = max_bw_settings_cnt;
4005
4006 /* Calculate min and max allowed per pipe BW */
4007 min_bw = mdata->max_bw_high;
4008 max_bw = 0;
4009
4010 while (i < max_bw_settings_cnt) {
4011 threshold = mdata->max_per_pipe_bw_settings[i].mdss_max_bw_val;
4012 if (threshold > max_bw)
4013 max_bw = threshold;
4014 if (threshold < min_bw)
4015 min_bw = threshold;
4016 ++i;
4017 }
4018 mdata->max_bw_per_pipe = max_bw;
4019 mdata->min_bw_per_pipe = min_bw;
4020}
4021
4022static int mdss_mdp_parse_dt_misc(struct platform_device *pdev)
4023{
4024 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4025 u32 data, slave_pingpong_off;
4026 const char *wfd_data;
4027 int rc;
4028 struct property *prop = NULL;
4029
4030 rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
4031 &data);
4032 mdata->rot_block_size = (!rc ? data : 128);
4033
4034 rc = of_property_read_u32(pdev->dev.of_node,
4035 "qcom,mdss-default-ot-rd-limit", &data);
4036 mdata->default_ot_rd_limit = (!rc ? data : 0);
4037
4038 rc = of_property_read_u32(pdev->dev.of_node,
4039 "qcom,mdss-default-ot-wr-limit", &data);
4040 mdata->default_ot_wr_limit = (!rc ? data : 0);
4041
4042 mdata->has_non_scalar_rgb = of_property_read_bool(pdev->dev.of_node,
4043 "qcom,mdss-has-non-scalar-rgb");
4044 mdata->has_bwc = of_property_read_bool(pdev->dev.of_node,
4045 "qcom,mdss-has-bwc");
4046 mdata->has_decimation = of_property_read_bool(pdev->dev.of_node,
4047 "qcom,mdss-has-decimation");
4048 mdata->has_no_lut_read = of_property_read_bool(pdev->dev.of_node,
4049 "qcom,mdss-no-lut-read");
4050 mdata->needs_hist_vote = !(of_property_read_bool(pdev->dev.of_node,
4051 "qcom,mdss-no-hist-vote"));
4052 wfd_data = of_get_property(pdev->dev.of_node,
4053 "qcom,mdss-wfd-mode", NULL);
4054 if (wfd_data) {
4055 pr_debug("wfd mode: %s\n", wfd_data);
4056 if (!strcmp(wfd_data, "intf")) {
4057 mdata->wfd_mode = MDSS_MDP_WFD_INTERFACE;
4058 } else if (!strcmp(wfd_data, "shared")) {
4059 mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
4060 } else if (!strcmp(wfd_data, "dedicated")) {
4061 mdata->wfd_mode = MDSS_MDP_WFD_DEDICATED;
4062 } else {
4063 pr_debug("wfd default mode: Shared\n");
4064 mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
4065 }
4066 } else {
4067 pr_warn("wfd mode not configured. Set to default: Shared\n");
4068 mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
4069 }
4070
4071 mdata->has_src_split = of_property_read_bool(pdev->dev.of_node,
4072 "qcom,mdss-has-source-split");
4073 mdata->has_fixed_qos_arbiter_enabled =
4074 of_property_read_bool(pdev->dev.of_node,
4075 "qcom,mdss-has-fixed-qos-arbiter-enabled");
4076 mdata->idle_pc_enabled = of_property_read_bool(pdev->dev.of_node,
4077 "qcom,mdss-idle-power-collapse-enabled");
4078
4079 prop = of_find_property(pdev->dev.of_node, "batfet-supply", NULL);
4080 mdata->batfet_required = prop ? true : false;
4081 mdata->en_svs_high = of_property_read_bool(pdev->dev.of_node,
4082 "qcom,mdss-en-svs-high");
4083 if (!mdata->en_svs_high)
4084 pr_debug("%s: svs_high is not enabled\n", __func__);
4085 rc = of_property_read_u32(pdev->dev.of_node,
4086 "qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit));
4087 if (rc)
4088 pr_debug("Could not read optional property: highest bank bit\n");
4089
4090 mdata->has_pingpong_split = of_property_read_bool(pdev->dev.of_node,
4091 "qcom,mdss-has-pingpong-split");
4092
4093 if (mdata->has_pingpong_split) {
4094 rc = of_property_read_u32(pdev->dev.of_node,
4095 "qcom,mdss-slave-pingpong-off",
4096 &slave_pingpong_off);
4097 if (rc) {
4098 pr_err("Error in device tree: slave pingpong offset\n");
4099 return rc;
4100 }
4101 mdata->slave_pingpong_base = mdata->mdss_io.base +
4102 slave_pingpong_off;
4103 rc = mdss_mdp_parse_dt_ppb_off(pdev);
4104 if (rc) {
4105 pr_err("Error in device tree: ppb offset not configured\n");
4106 return rc;
4107 }
4108 }
4109
4110 /*
4111 * 2x factor on AB because bus driver will divide by 2
4112 * due to 2x ports to BIMC
4113 */
4114 mdata->ab_factor.numer = 2;
4115 mdata->ab_factor.denom = 1;
4116 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ab-factor",
4117 &mdata->ab_factor);
4118
4119 /*
4120 * 1.2 factor on ib as default value. This value is
4121 * experimentally determined and should be tuned in device
4122 * tree.
4123 */
4124 mdata->ib_factor.numer = 6;
4125 mdata->ib_factor.denom = 5;
4126 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor",
4127 &mdata->ib_factor);
4128
4129 /*
4130 * Set overlap ib value equal to ib by default. This value can
4131 * be tuned in device tree to be different from ib.
4132 * This factor apply when the max bandwidth per pipe
4133 * is the overlap BW.
4134 */
4135 mdata->ib_factor_overlap.numer = mdata->ib_factor.numer;
4136 mdata->ib_factor_overlap.denom = mdata->ib_factor.denom;
4137 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor-overlap",
4138 &mdata->ib_factor_overlap);
4139
4140 mdata->clk_factor.numer = 1;
4141 mdata->clk_factor.denom = 1;
4142 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor",
4143 &mdata->clk_factor);
4144
4145 rc = of_property_read_u32(pdev->dev.of_node,
4146 "qcom,max-bandwidth-low-kbps", &mdata->max_bw_low);
4147 if (rc)
4148 pr_debug("max bandwidth (low) property not specified\n");
4149
4150 rc = of_property_read_u32(pdev->dev.of_node,
4151 "qcom,max-bandwidth-high-kbps", &mdata->max_bw_high);
4152 if (rc)
4153 pr_debug("max bandwidth (high) property not specified\n");
4154
4155 mdss_mdp_parse_per_pipe_bandwidth(pdev);
4156
4157 mdss_mdp_parse_max_bandwidth(pdev);
4158
4159 mdata->nclk_lvl = mdss_mdp_parse_dt_prop_len(pdev,
4160 "qcom,mdss-clk-levels");
4161
4162 if (mdata->nclk_lvl) {
4163 mdata->clock_levels = kcalloc(mdata->nclk_lvl, sizeof(u32),
4164 GFP_KERNEL);
4165 if (!mdata->clock_levels)
4166 return -ENOMEM;
4167
4168 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-clk-levels",
4169 mdata->clock_levels, mdata->nclk_lvl);
4170 if (rc)
4171 pr_debug("clock levels not found\n");
4172 }
4173
4174 mdss_mdp_parse_vbif_qos(pdev);
4175 mdata->traffic_shaper_en = of_property_read_bool(pdev->dev.of_node,
4176 "qcom,mdss-traffic-shaper-enabled");
4177 mdata->has_rot_dwnscale = of_property_read_bool(pdev->dev.of_node,
4178 "qcom,mdss-has-rotator-downscale");
4179 if (mdata->has_rot_dwnscale) {
4180 rc = of_property_read_u32(pdev->dev.of_node,
4181 "qcom,mdss-rot-downscale-min",
4182 &mdata->rot_dwnscale_min);
4183 if (rc)
4184 pr_err("Min rotator downscale property not specified\n");
4185
4186 rc = of_property_read_u32(pdev->dev.of_node,
4187 "qcom,mdss-rot-downscale-max",
4188 &mdata->rot_dwnscale_max);
4189 if (rc)
4190 pr_err("Max rotator downscale property not specified\n");
4191 }
4192
4193 rc = of_property_read_u32(pdev->dev.of_node,
4194 "qcom,mdss-dram-channels", &mdata->bus_channels);
4195 if (rc)
4196 pr_debug("number of channels property not specified\n");
4197
4198 rc = of_property_read_u32(pdev->dev.of_node,
4199 "qcom,max-pipe-width", &mdata->max_pipe_width);
4200 if (rc) {
4201 pr_debug("max pipe width not specified. Using default value\n");
4202 mdata->max_pipe_width = DEFAULT_MDP_PIPE_WIDTH;
4203 }
4204 return 0;
4205}
4206
4207static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev)
4208{
4209 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4210 u32 *ad_offsets = NULL;
4211 int rc;
4212
4213 mdata->nad_cfgs = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-ad-off");
4214
4215 if (mdata->nad_cfgs == 0) {
4216 mdata->ad_cfgs = NULL;
4217 return 0;
4218 }
4219
4220 if (mdata->nad_cfgs > mdata->nmixers_intf)
4221 return -EINVAL;
4222
4223
4224 mdata->has_wb_ad = of_property_read_bool(pdev->dev.of_node,
4225 "qcom,mdss-has-wb-ad");
4226
4227 ad_offsets = kcalloc(mdata->nad_cfgs, sizeof(u32), GFP_KERNEL);
4228 if (!ad_offsets)
4229 return -ENOMEM;
4230
4231 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ad-off", ad_offsets,
4232 mdata->nad_cfgs);
4233 if (rc)
4234 goto parse_done;
4235
4236 rc = mdss_mdp_ad_addr_setup(mdata, ad_offsets);
4237 if (rc)
4238 pr_err("unable to setup assertive display\n");
4239
4240parse_done:
4241 kfree(ad_offsets);
4242 return rc;
4243}
4244
4245static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev)
4246{
4247 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4248 u32 len, index;
4249 const u32 *arr;
4250
4251 arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-ctl-off", &len);
4252 if (arr) {
4253 mdata->nppb_ctl = len / sizeof(u32);
4254 mdata->ppb_ctl = devm_kzalloc(&mdata->pdev->dev,
4255 sizeof(u32) * mdata->nppb_ctl, GFP_KERNEL);
4256
4257 if (mdata->ppb_ctl == NULL)
4258 return -ENOMEM;
4259
4260 for (index = 0; index < mdata->nppb_ctl; index++)
4261 mdata->ppb_ctl[index] = be32_to_cpu(arr[index]);
4262 }
4263
4264 arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-cfg-off", &len);
4265 if (arr) {
4266 mdata->nppb_cfg = len / sizeof(u32);
4267 mdata->ppb_cfg = devm_kzalloc(&mdata->pdev->dev,
4268 sizeof(u32) * mdata->nppb_cfg, GFP_KERNEL);
4269
4270 if (mdata->ppb_cfg == NULL)
4271 return -ENOMEM;
4272
4273 for (index = 0; index < mdata->nppb_cfg; index++)
4274 mdata->ppb_cfg[index] = be32_to_cpu(arr[index]);
4275 }
4276 return 0;
4277}
4278
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05304279#ifdef CONFIG_QCOM_BUS_SCALING
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304280static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
4281{
4282 int rc, paths;
4283 struct device_node *node;
4284 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4285
4286 rc = of_property_read_u32(pdev->dev.of_node,
4287 "qcom,msm-bus,num-paths", &paths);
4288 if (rc) {
4289 pr_err("Error. qcom,msm-bus,num-paths prop not found.rc=%d\n",
4290 rc);
4291 return rc;
4292 }
4293 mdss_res->axi_port_cnt = paths;
4294
4295 rc = of_property_read_u32(pdev->dev.of_node,
4296 "qcom,mdss-num-nrt-paths", &mdata->nrt_axi_port_cnt);
4297 if (rc && mdata->has_fixed_qos_arbiter_enabled) {
4298 pr_err("Error. qcom,mdss-num-nrt-paths prop not found.rc=%d\n",
4299 rc);
4300 return rc;
4301 }
4302 rc = 0;
4303
4304 mdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
4305 if (IS_ERR_OR_NULL(mdata->bus_scale_table)) {
4306 rc = PTR_ERR(mdata->bus_scale_table);
4307 if (!rc)
4308 rc = -EINVAL;
4309 pr_err("msm_bus_cl_get_pdata failed. rc=%d\n", rc);
4310 mdata->bus_scale_table = NULL;
4311 return rc;
4312 }
4313
4314 /*
4315 * if mdss-reg-bus is not found then default table is picked
4316 * hence below code wont return error.
4317 */
4318 node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-reg-bus");
4319 if (node) {
4320 mdata->reg_bus_scale_table =
4321 msm_bus_pdata_from_node(pdev, node);
4322 if (IS_ERR_OR_NULL(mdata->reg_bus_scale_table)) {
4323 rc = PTR_ERR(mdata->reg_bus_scale_table);
4324 if (!rc)
4325 pr_err("bus_pdata reg_bus failed rc=%d\n", rc);
4326 rc = 0;
4327 mdata->reg_bus_scale_table = NULL;
4328 }
4329 } else {
4330 rc = 0;
4331 mdata->reg_bus_scale_table = NULL;
4332 pr_debug("mdss-reg-bus not found\n");
4333 }
4334
4335 node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-hw-rt-bus");
4336 if (node) {
4337 mdata->hw_rt_bus_scale_table =
4338 msm_bus_pdata_from_node(pdev, node);
4339 if (IS_ERR_OR_NULL(mdata->hw_rt_bus_scale_table)) {
4340 rc = PTR_ERR(mdata->hw_rt_bus_scale_table);
4341 if (!rc)
4342 pr_err("hw_rt_bus_scale failed rc=%d\n", rc);
4343 rc = 0;
4344 mdata->hw_rt_bus_scale_table = NULL;
4345 }
4346 } else {
4347 rc = 0;
4348 mdata->hw_rt_bus_scale_table = NULL;
4349 pr_debug("mdss-hw-rt-bus not found\n");
4350 }
4351
4352 return rc;
4353}
4354#else
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05304355__maybe_unused
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304356static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
4357{
4358 return 0;
4359}
4360
4361#endif
4362
4363static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
4364 char *prop_name, u32 *offsets, int len)
4365{
4366 int rc;
4367
4368 rc = of_property_read_u32_array(pdev->dev.of_node, prop_name,
4369 offsets, len);
4370 if (rc) {
4371 pr_err("Error from prop %s : u32 array read\n", prop_name);
4372 return -EINVAL;
4373 }
4374
4375 return 0;
4376}
4377
4378static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
4379 char *prop_name)
4380{
4381 int len = 0;
4382
4383 of_find_property(pdev->dev.of_node, prop_name, &len);
4384
4385 if (len < 1) {
4386 pr_debug("prop %s : doesn't exist in device tree\n",
4387 prop_name);
4388 return 0;
4389 }
4390
4391 len = len/sizeof(u32);
4392
4393 return len;
4394}
4395
4396struct mdss_data_type *mdss_mdp_get_mdata(void)
4397{
4398 return mdss_res;
4399}
4400
4401void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable)
4402{
4403 int ret;
4404
4405 if (!mdata->batfet_required)
4406 return;
4407
4408 if (!mdata->batfet) {
4409 if (enable) {
4410 mdata->batfet = devm_regulator_get(&mdata->pdev->dev,
4411 "batfet");
4412 if (IS_ERR_OR_NULL(mdata->batfet)) {
4413 pr_debug("unable to get batfet reg. rc=%d\n",
4414 PTR_RET(mdata->batfet));
4415 mdata->batfet = NULL;
4416 return;
4417 }
4418 } else {
4419 pr_debug("Batfet regulator disable w/o enable\n");
4420 return;
4421 }
4422 }
4423
4424 if (enable) {
4425 ret = regulator_enable(mdata->batfet);
4426 if (ret)
4427 pr_err("regulator_enable failed\n");
4428 } else {
4429 regulator_disable(mdata->batfet);
4430 }
4431}
4432
4433/**
4434 * mdss_is_ready() - checks if mdss is probed and ready
4435 *
4436 * Checks if mdss resources have been initialized
4437 *
4438 * returns true if mdss is ready, else returns false
4439 */
4440bool mdss_is_ready(void)
4441{
4442 return mdss_mdp_get_mdata() ? true : false;
4443}
4444EXPORT_SYMBOL(mdss_mdp_get_mdata);
4445
4446/**
4447 * mdss_panel_intf_type() - checks if a given intf type is primary
4448 * @intf_val: panel interface type of the individual controller
4449 *
4450 * Individual controller queries with MDP to check if it is
4451 * configured as the primary interface.
4452 *
4453 * returns a pointer to the configured structure mdss_panel_cfg
4454 * to the controller that's configured as the primary panel interface.
4455 * returns NULL on error or if @intf_val is not the configured
4456 * controller.
4457 */
4458struct mdss_panel_cfg *mdss_panel_intf_type(int intf_val)
4459{
4460 if (!mdss_res || !mdss_res->pan_cfg.init_done)
4461 return ERR_PTR(-EPROBE_DEFER);
4462
4463 if (mdss_res->pan_cfg.pan_intf == intf_val)
4464 return &mdss_res->pan_cfg;
4465 else
4466 return NULL;
4467}
4468EXPORT_SYMBOL(mdss_panel_intf_type);
4469
4470struct irq_info *mdss_intr_line()
4471{
4472 return mdss_mdp_hw.irq_info;
4473}
4474EXPORT_SYMBOL(mdss_intr_line);
4475
4476int mdss_mdp_wait_for_xin_halt(u32 xin_id, bool is_vbif_nrt)
4477{
4478 void __iomem *vbif_base;
4479 u32 status;
4480 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4481 u32 idle_mask = BIT(xin_id);
4482 int rc;
4483
4484 vbif_base = is_vbif_nrt ? mdata->vbif_nrt_io.base :
4485 mdata->vbif_io.base;
4486
4487 rc = readl_poll_timeout(vbif_base + MMSS_VBIF_XIN_HALT_CTRL1,
4488 status, (status & idle_mask),
4489 1000, XIN_HALT_TIMEOUT_US);
4490 if (rc == -ETIMEDOUT) {
4491 pr_err("VBIF client %d not halting. TIMEDOUT.\n",
4492 xin_id);
4493 MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
4494 "dbg_bus", "vbif_dbg_bus", "panic");
4495 } else {
4496 pr_debug("VBIF client %d is halted\n", xin_id);
4497 }
4498
4499 return rc;
4500}
4501
4502/**
4503 * force_on_xin_clk() - enable/disable the force-on for the pipe clock
4504 * @bit_off: offset of the bit to enable/disable the force-on.
4505 * @reg_off: register offset for the clock control.
4506 * @enable: boolean to indicate if the force-on of the clock needs to be
4507 * enabled or disabled.
4508 *
4509 * This function returns:
4510 * true - if the clock is forced-on by this function
4511 * false - if the clock was already forced on
4512 * It is the caller responsibility to check if this function is forcing
4513 * the clock on; if so, it will need to remove the force of the clock,
4514 * otherwise it should avoid to remove the force-on.
4515 * Clocks must be on when calling this function.
4516 */
4517bool force_on_xin_clk(u32 bit_off, u32 clk_ctl_reg_off, bool enable)
4518{
4519 u32 val;
4520 u32 force_on_mask;
4521 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4522 bool clk_forced_on = false;
4523
4524 force_on_mask = BIT(bit_off);
4525 val = readl_relaxed(mdata->mdp_base + clk_ctl_reg_off);
4526
4527 clk_forced_on = !(force_on_mask & val);
4528
4529 if (true == enable)
4530 val |= force_on_mask;
4531 else
4532 val &= ~force_on_mask;
4533
4534 writel_relaxed(val, mdata->mdp_base + clk_ctl_reg_off);
4535
4536 return clk_forced_on;
4537}
4538
4539static void apply_dynamic_ot_limit(u32 *ot_lim,
4540 struct mdss_mdp_set_ot_params *params)
4541{
4542 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4543 u32 res, read_vbif_ot;
4544 u32 rot_ot = 4;
4545
4546 if (false == test_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map))
4547 return;
4548
4549 /* Dynamic OT setting done only for rotator and WFD */
4550 if (!((params->is_rot && params->is_yuv) || params->is_wb))
4551 return;
4552
4553 res = params->width * params->height;
4554
4555 pr_debug("w:%d h:%d rot:%d yuv:%d wb:%d res:%d fps:%d\n",
4556 params->width, params->height, params->is_rot,
4557 params->is_yuv, params->is_wb, res, params->frame_rate);
4558
4559 switch (mdata->mdp_rev) {
4560 case MDSS_MDP_HW_REV_114:
4561 /*
4562 * MDP rev is same for msm8937 and msm8940, but rotator OT
4563 * recommendations are different. Setting it based on AXI OT.
4564 */
4565 read_vbif_ot = MDSS_VBIF_READ(mdata, MMSS_VBIF_OUT_RD_LIM_CONF0,
4566 false);
4567 rot_ot = (read_vbif_ot == 0x10) ? 4 : 8;
4568 /* fall-through */
4569 case MDSS_MDP_HW_REV_115:
4570 case MDSS_MDP_HW_REV_116:
4571 if ((res <= RES_1080p) && (params->frame_rate <= 30))
4572 *ot_lim = 2;
4573 else if (params->is_rot && params->is_yuv)
4574 *ot_lim = rot_ot;
4575 else
4576 *ot_lim = 6;
4577 break;
4578 default:
4579 if (res <= RES_1080p) {
4580 *ot_lim = 2;
4581 } else if (res <= RES_UHD) {
4582 if (params->is_rot && params->is_yuv)
4583 *ot_lim = 8;
4584 else
4585 *ot_lim = 16;
4586 }
4587 break;
4588 }
4589}
4590
4591static u32 get_ot_limit(u32 reg_off, u32 bit_off,
4592 struct mdss_mdp_set_ot_params *params)
4593{
4594 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4595 u32 ot_lim = 0;
4596 u32 is_vbif_nrt, val;
4597
4598 if (mdata->default_ot_wr_limit &&
4599 (params->reg_off_vbif_lim_conf == MMSS_VBIF_WR_LIM_CONF))
4600 ot_lim = mdata->default_ot_wr_limit;
4601 else if (mdata->default_ot_rd_limit &&
4602 (params->reg_off_vbif_lim_conf == MMSS_VBIF_RD_LIM_CONF))
4603 ot_lim = mdata->default_ot_rd_limit;
4604
4605 /*
4606 * If default ot is not set from dt,
4607 * then do not configure it.
4608 */
4609 if (ot_lim == 0)
4610 goto exit;
4611
4612 /* Modify the limits if the target and the use case requires it */
4613 apply_dynamic_ot_limit(&ot_lim, params);
4614
4615 is_vbif_nrt = params->is_vbif_nrt;
4616 val = MDSS_VBIF_READ(mdata, reg_off, is_vbif_nrt);
4617 val &= (0xFF << bit_off);
4618 val = val >> bit_off;
4619
4620 if (val == ot_lim)
4621 ot_lim = 0;
4622
4623exit:
4624 pr_debug("ot_lim=%d\n", ot_lim);
4625 return ot_lim;
4626}
4627
4628void mdss_mdp_set_ot_limit(struct mdss_mdp_set_ot_params *params)
4629{
4630 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4631 u32 ot_lim;
4632 u32 reg_off_vbif_lim_conf = (params->xin_id / 4) * 4 +
4633 params->reg_off_vbif_lim_conf;
4634 u32 bit_off_vbif_lim_conf = (params->xin_id % 4) * 8;
4635 bool is_vbif_nrt = params->is_vbif_nrt;
4636 u32 reg_val;
4637 bool forced_on;
4638
4639 ot_lim = get_ot_limit(
4640 reg_off_vbif_lim_conf,
4641 bit_off_vbif_lim_conf,
4642 params) & 0xFF;
4643
4644 if (ot_lim == 0)
4645 goto exit;
4646
4647 trace_mdp_perf_set_ot(params->num, params->xin_id, ot_lim,
4648 is_vbif_nrt);
4649
4650 mutex_lock(&mdata->reg_lock);
4651
4652 forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
4653 params->reg_off_mdp_clk_ctrl, true);
4654
4655 reg_val = MDSS_VBIF_READ(mdata, reg_off_vbif_lim_conf,
4656 is_vbif_nrt);
4657 reg_val &= ~(0xFF << bit_off_vbif_lim_conf);
4658 reg_val |= (ot_lim) << bit_off_vbif_lim_conf;
4659 MDSS_VBIF_WRITE(mdata, reg_off_vbif_lim_conf, reg_val,
4660 is_vbif_nrt);
4661
4662 reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4663 is_vbif_nrt);
4664 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4665 reg_val | BIT(params->xin_id), is_vbif_nrt);
4666
4667 mutex_unlock(&mdata->reg_lock);
4668 mdss_mdp_wait_for_xin_halt(params->xin_id, is_vbif_nrt);
4669 mutex_lock(&mdata->reg_lock);
4670
4671 reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4672 is_vbif_nrt);
4673 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4674 reg_val & ~BIT(params->xin_id), is_vbif_nrt);
4675
4676 if (forced_on)
4677 force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
4678 params->reg_off_mdp_clk_ctrl, false);
4679
4680 mutex_unlock(&mdata->reg_lock);
4681
4682exit:
4683 return;
4684}
4685
4686#define RPM_MISC_REQ_TYPE 0x6373696d
4687#define RPM_MISC_REQ_SVS_PLUS_KEY 0x2B737673
4688
4689static void mdss_mdp_config_cx_voltage(struct mdss_data_type *mdata, int enable)
4690{
4691 int ret = 0;
4692 static struct msm_rpm_kvp rpm_kvp;
4693 static uint8_t svs_en;
4694
4695 if (!mdata->en_svs_high)
4696 return;
4697
4698 if (!rpm_kvp.key) {
4699 rpm_kvp.key = RPM_MISC_REQ_SVS_PLUS_KEY;
4700 rpm_kvp.length = sizeof(uint64_t);
4701 pr_debug("%s: Initialized rpm_kvp structure\n", __func__);
4702 }
4703
4704 if (enable) {
4705 svs_en = 1;
4706 rpm_kvp.data = &svs_en;
4707 pr_debug("%s: voting for svs high\n", __func__);
4708 ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
4709 RPM_MISC_REQ_TYPE, 0,
4710 &rpm_kvp, 1);
4711 if (ret)
4712 pr_err("vote for active_set svs high failed: %d\n",
4713 ret);
4714 ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET,
4715 RPM_MISC_REQ_TYPE, 0,
4716 &rpm_kvp, 1);
4717 if (ret)
4718 pr_err("vote for sleep_set svs high failed: %d\n",
4719 ret);
4720 } else {
4721 svs_en = 0;
4722 rpm_kvp.data = &svs_en;
4723 pr_debug("%s: Removing vote for svs high\n", __func__);
4724 ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
4725 RPM_MISC_REQ_TYPE, 0,
4726 &rpm_kvp, 1);
4727 if (ret)
4728 pr_err("Remove vote:active_set svs high failed: %d\n",
4729 ret);
4730 ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET,
4731 RPM_MISC_REQ_TYPE, 0,
4732 &rpm_kvp, 1);
4733 if (ret)
4734 pr_err("Remove vote:sleep_set svs high failed: %d\n",
4735 ret);
4736 }
4737}
4738
4739static int mdss_mdp_cx_ctrl(struct mdss_data_type *mdata, int enable)
4740{
4741 int rc = 0;
4742
4743 if (!mdata->vdd_cx)
4744 return rc;
4745
4746 if (enable) {
4747 rc = regulator_set_voltage(
4748 mdata->vdd_cx,
4749 RPM_REGULATOR_CORNER_SVS_SOC,
4750 RPM_REGULATOR_CORNER_SUPER_TURBO);
4751 if (rc < 0)
4752 goto vreg_set_voltage_fail;
4753
4754 pr_debug("Enabling CX power rail\n");
4755 rc = regulator_enable(mdata->vdd_cx);
4756 if (rc) {
4757 pr_err("Failed to enable regulator.\n");
4758 return rc;
4759 }
4760 } else {
4761 pr_debug("Disabling CX power rail\n");
4762 rc = regulator_disable(mdata->vdd_cx);
4763 if (rc) {
4764 pr_err("Failed to disable regulator.\n");
4765 return rc;
4766 }
4767 rc = regulator_set_voltage(
4768 mdata->vdd_cx,
4769 RPM_REGULATOR_CORNER_NONE,
4770 RPM_REGULATOR_CORNER_SUPER_TURBO);
4771 if (rc < 0)
4772 goto vreg_set_voltage_fail;
4773 }
4774
4775 return rc;
4776
4777vreg_set_voltage_fail:
4778 pr_err("Set vltg fail\n");
4779 return rc;
4780}
4781
4782/**
4783 * mdss_mdp_footswitch_ctrl() - Disable/enable MDSS GDSC and CX/Batfet rails
4784 * @mdata: MDP private data
4785 * @on: 1 to turn on footswitch, 0 to turn off footswitch
4786 *
4787 * When no active references to the MDP device node and it's child nodes are
4788 * held, MDSS GDSC can be turned off. However, any any panels are still
4789 * active (but likely in an idle state), the vote for the CX and the batfet
4790 * rails should not be released.
4791 */
4792static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
4793{
4794 int ret;
4795 int active_cnt = 0;
4796
4797 if (!mdata->fs)
4798 return;
4799
4800 MDSS_XLOG(on, mdata->fs_ena, mdata->idle_pc, mdata->en_svs_high,
4801 atomic_read(&mdata->active_intf_cnt));
4802
4803 if (on) {
4804 if (!mdata->fs_ena) {
4805 pr_debug("Enable MDP FS\n");
4806 if (mdata->venus) {
4807 ret = regulator_enable(mdata->venus);
4808 if (ret)
4809 pr_err("venus failed to enable\n");
4810 }
4811
4812 ret = regulator_enable(mdata->fs);
4813 if (ret)
4814 pr_warn("Footswitch failed to enable\n");
4815 if (!mdata->idle_pc) {
4816 mdss_mdp_cx_ctrl(mdata, true);
4817 mdss_mdp_batfet_ctrl(mdata, true);
4818 }
4819 }
4820 if (mdata->en_svs_high)
4821 mdss_mdp_config_cx_voltage(mdata, true);
4822 mdata->fs_ena = true;
4823 } else {
4824 if (mdata->fs_ena) {
4825 pr_debug("Disable MDP FS\n");
4826 active_cnt = atomic_read(&mdata->active_intf_cnt);
4827 if (active_cnt != 0) {
4828 /*
4829 * Turning off GDSC while overlays are still
4830 * active.
4831 */
4832 mdata->idle_pc = true;
4833 pr_debug("idle pc. active overlays=%d\n",
4834 active_cnt);
4835 mdss_mdp_memory_retention_enter();
4836 } else {
4837 mdss_mdp_cx_ctrl(mdata, false);
4838 mdss_mdp_batfet_ctrl(mdata, false);
4839 }
4840 if (mdata->en_svs_high)
4841 mdss_mdp_config_cx_voltage(mdata, false);
4842 regulator_disable(mdata->fs);
4843 if (mdata->venus)
4844 regulator_disable(mdata->venus);
4845 }
4846 mdata->fs_ena = false;
4847 }
4848}
4849
4850int mdss_mdp_secure_display_ctrl(struct mdss_data_type *mdata,
4851 unsigned int enable)
4852{
4853 struct sd_ctrl_req {
4854 unsigned int enable;
4855 } __attribute__ ((__packed__)) request;
4856 unsigned int resp = -1;
4857 int ret = 0;
4858 struct scm_desc desc;
4859
4860 if ((enable && (mdss_get_sd_client_cnt() > 0)) ||
4861 (!enable && (mdss_get_sd_client_cnt() > 1))) {
4862 mdss_update_sd_client(mdata, enable);
4863 return ret;
4864 }
4865
4866 desc.args[0] = request.enable = enable;
4867 desc.arginfo = SCM_ARGS(1);
4868
4869 if (!is_scm_armv8()) {
4870 ret = scm_call(SCM_SVC_MP, MEM_PROTECT_SD_CTRL,
4871 &request, sizeof(request), &resp, sizeof(resp));
4872 } else {
4873 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
4874 mem_protect_sd_ctrl_id), &desc);
4875 resp = desc.ret[0];
4876 }
4877
4878 pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x",
4879 enable, ret, resp);
4880 if (ret)
4881 return ret;
4882
4883 mdss_update_sd_client(mdata, enable);
4884 return resp;
4885}
4886
4887static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
4888{
4889 mdata->suspend_fs_ena = mdata->fs_ena;
4890 mdss_mdp_footswitch_ctrl(mdata, false);
4891
4892 pr_debug("suspend done fs=%d\n", mdata->suspend_fs_ena);
4893
4894 return 0;
4895}
4896
4897static inline int mdss_mdp_resume_sub(struct mdss_data_type *mdata)
4898{
4899 if (mdata->suspend_fs_ena)
4900 mdss_mdp_footswitch_ctrl(mdata, true);
4901
4902 pr_debug("resume done fs=%d\n", mdata->suspend_fs_ena);
4903
4904 return 0;
4905}
4906
4907#ifdef CONFIG_PM_SLEEP
4908static int mdss_mdp_pm_suspend(struct device *dev)
4909{
4910 struct mdss_data_type *mdata;
4911
4912 mdata = dev_get_drvdata(dev);
4913 if (!mdata)
4914 return -ENODEV;
4915
4916 dev_dbg(dev, "display pm suspend\n");
4917
4918 return mdss_mdp_suspend_sub(mdata);
4919}
4920
4921static int mdss_mdp_pm_resume(struct device *dev)
4922{
4923 struct mdss_data_type *mdata;
4924
4925 mdata = dev_get_drvdata(dev);
4926 if (!mdata)
4927 return -ENODEV;
4928
4929 dev_dbg(dev, "display pm resume\n");
4930
4931 /*
4932 * It is possible that the runtime status of the mdp device may
4933 * have been active when the system was suspended. Reset the runtime
4934 * status to suspended state after a complete system resume.
4935 */
4936 pm_runtime_disable(dev);
4937 pm_runtime_set_suspended(dev);
4938 pm_runtime_enable(dev);
4939
4940 return mdss_mdp_resume_sub(mdata);
4941}
4942#endif
4943
4944#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
4945static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state)
4946{
4947 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4948
4949 if (!mdata)
4950 return -ENODEV;
4951
4952 dev_dbg(&pdev->dev, "display suspend\n");
4953
4954 return mdss_mdp_suspend_sub(mdata);
4955}
4956
4957static int mdss_mdp_resume(struct platform_device *pdev)
4958{
4959 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4960
4961 if (!mdata)
4962 return -ENODEV;
4963
4964 dev_dbg(&pdev->dev, "display resume\n");
4965
4966 return mdss_mdp_resume_sub(mdata);
4967}
4968#else
4969#define mdss_mdp_suspend NULL
4970#define mdss_mdp_resume NULL
4971#endif
4972
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05304973#ifdef CONFIG_PM
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304974static int mdss_mdp_runtime_resume(struct device *dev)
4975{
4976 struct mdss_data_type *mdata = dev_get_drvdata(dev);
4977 bool device_on = true;
4978
4979 if (!mdata)
4980 return -ENODEV;
4981
4982 dev_dbg(dev, "pm_runtime: resuming. active overlay cnt=%d\n",
4983 atomic_read(&mdata->active_intf_cnt));
4984
4985 /* do not resume panels when coming out of idle power collapse */
4986 if (!mdata->idle_pc)
4987 device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
4988 mdss_mdp_footswitch_ctrl(mdata, true);
4989
4990 return 0;
4991}
4992
4993static int mdss_mdp_runtime_idle(struct device *dev)
4994{
4995 struct mdss_data_type *mdata = dev_get_drvdata(dev);
4996
4997 if (!mdata)
4998 return -ENODEV;
4999
5000 dev_dbg(dev, "pm_runtime: idling...\n");
5001
5002 return 0;
5003}
5004
5005static int mdss_mdp_runtime_suspend(struct device *dev)
5006{
5007 struct mdss_data_type *mdata = dev_get_drvdata(dev);
5008 bool device_on = false;
5009
5010 if (!mdata)
5011 return -ENODEV;
5012 dev_dbg(dev, "pm_runtime: suspending. active overlay cnt=%d\n",
5013 atomic_read(&mdata->active_intf_cnt));
5014
5015 if (mdata->clk_ena) {
5016 pr_err("MDP suspend failed\n");
5017 return -EBUSY;
5018 }
5019
5020 mdss_mdp_footswitch_ctrl(mdata, false);
5021 /* do not suspend panels when going in to idle power collapse */
5022 if (!mdata->idle_pc)
5023 device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
5024
5025 return 0;
5026}
5027#endif
5028
5029static const struct dev_pm_ops mdss_mdp_pm_ops = {
5030 SET_SYSTEM_SLEEP_PM_OPS(mdss_mdp_pm_suspend, mdss_mdp_pm_resume)
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305031#ifdef CONFIG_PM
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305032 SET_RUNTIME_PM_OPS(mdss_mdp_runtime_suspend,
5033 mdss_mdp_runtime_resume,
5034 mdss_mdp_runtime_idle)
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305035#endif
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305036};
5037
5038static int mdss_mdp_remove(struct platform_device *pdev)
5039{
5040 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
5041
5042 if (!mdata)
5043 return -ENODEV;
5044 pm_runtime_disable(&pdev->dev);
5045 mdss_mdp_pp_term(&pdev->dev);
5046 mdss_mdp_bus_scale_unregister(mdata);
5047 mdss_debugfs_remove(mdata);
5048 if (mdata->regulator_notif_register)
5049 regulator_unregister_notifier(mdata->fs, &(mdata->gdsc_cb));
5050 return 0;
5051}
5052
5053static const struct of_device_id mdss_mdp_dt_match[] = {
5054 { .compatible = "qcom,mdss_mdp",},
5055 {}
5056};
5057MODULE_DEVICE_TABLE(of, mdss_mdp_dt_match);
5058
5059static struct platform_driver mdss_mdp_driver = {
5060 .probe = mdss_mdp_probe,
5061 .remove = mdss_mdp_remove,
5062 .suspend = mdss_mdp_suspend,
5063 .resume = mdss_mdp_resume,
5064 .shutdown = NULL,
5065 .driver = {
5066 /*
5067 * Driver name must match the device name added in
5068 * platform.c.
5069 */
5070 .name = "mdp",
5071 .of_match_table = mdss_mdp_dt_match,
5072 .pm = &mdss_mdp_pm_ops,
5073 },
5074};
5075
5076static int mdss_mdp_register_driver(void)
5077{
5078 return platform_driver_register(&mdss_mdp_driver);
5079}
5080
5081static int __init mdss_mdp_driver_init(void)
5082{
5083 int ret;
5084
5085 ret = mdss_mdp_register_driver();
5086 if (ret) {
5087 pr_err("mdp_register_driver() failed!\n");
5088 return ret;
5089 }
5090
5091 return 0;
5092
5093}
5094
5095module_param_string(panel, mdss_mdp_panel, MDSS_MAX_PANEL_LEN, 0600);
5096/*
5097 * panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>:<panel_topology_cfg>
5098 * where <lk_cfg> is "1"-lk/gcdb config or "0" non-lk/non-gcdb
5099 * config; <pan_intf> is dsi:<ctrl_id> or hdmi or edp
5100 * <pan_intf_cfg> is panel interface specific string
5101 * Ex: This string is panel's device node name from DT
5102 * for DSI interface
5103 * hdmi/edp interface does not use this string
5104 * <panel_topology_cfg> is an optional string. Currently it is
5105 * only valid for DSI panels. In dual-DSI case, it needs to be
5106 * used on both panels or none. When used, format is config%d
5107 * where %d is one of the configuration found in device node of
5108 * panel selected by <pan_intf_cfg>
5109 */
5110MODULE_PARM_DESC(panel, "lk supplied panel selection string");
5111MODULE_PARM_DESC(panel,
5112 "panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>:<panel_topology_cfg>");
5113module_init(mdss_mdp_driver_init);