blob: a9a5d8f022abacd42cceac5169309093019590c1 [file] [log] [blame]
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301/*
2 * MDSS MDP Interface (used by framebuffer core)
3 *
4 * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
5 * Copyright (C) 2007 Google Incorporated
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#define pr_fmt(fmt) "%s: " fmt, __func__
18
19#include <linux/clk.h>
20#include <linux/debugfs.h>
21#include <linux/delay.h>
22#include <linux/hrtimer.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/iommu.h>
28#include <linux/iopoll.h>
29#include <linux/of.h>
30#include <linux/of_address.h>
31#include <linux/pm.h>
32#include <linux/pm_runtime.h>
33#include <linux/regulator/consumer.h>
34#include <linux/regulator/rpm-smd-regulator.h>
35#include <linux/module.h>
36#include <linux/mutex.h>
37#include <linux/sched.h>
38#include <linux/time.h>
39#include <linux/spinlock.h>
40#include <linux/semaphore.h>
41#include <linux/uaccess.h>
42#include <linux/clk/msm-clk.h>
43#include <linux/irqdomain.h>
44#include <linux/irq.h>
45
46#include <linux/msm-bus.h>
47#include <linux/msm-bus-board.h>
48#include <soc/qcom/scm.h>
49#include <soc/qcom/rpm-smd.h>
50
51#include "mdss.h"
52#include "mdss_fb.h"
53#include "mdss_mdp.h"
54#include "mdss_panel.h"
55#include "mdss_debug.h"
56#include "mdss_mdp_debug.h"
57#include "mdss_smmu.h"
58
59#include "mdss_mdp_trace.h"
60
61#define AXI_HALT_TIMEOUT_US 0x4000
62#define AUTOSUSPEND_TIMEOUT_MS 200
63#define DEFAULT_MDP_PIPE_WIDTH 2048
64#define RES_1080p (1088*1920)
65#define RES_UHD (3840*2160)
66
67struct mdss_data_type *mdss_res;
68static u32 mem_protect_sd_ctrl_id;
69
70static int mdss_fb_mem_get_iommu_domain(void)
71{
72 return mdss_smmu_get_domain_id(MDSS_IOMMU_DOMAIN_UNSECURE);
73}
74
75struct msm_mdp_interface mdp5 = {
76 .init_fnc = mdss_mdp_overlay_init,
77 .fb_mem_get_iommu_domain = mdss_fb_mem_get_iommu_domain,
78 .fb_stride = mdss_mdp_fb_stride,
79 .check_dsi_status = mdss_check_dsi_ctrl_status,
80 .get_format_params = mdss_mdp_get_format_params,
81};
82
83#define IB_QUOTA 2000000000
84#define AB_QUOTA 2000000000
85
86#define MAX_AXI_PORT_COUNT 3
87
88#define MEM_PROTECT_SD_CTRL 0xF
89#define MEM_PROTECT_SD_CTRL_FLAT 0x14
90
91static DEFINE_SPINLOCK(mdp_lock);
92static DEFINE_SPINLOCK(mdss_mdp_intr_lock);
93static DEFINE_MUTEX(mdp_clk_lock);
94static DEFINE_MUTEX(mdp_iommu_ref_cnt_lock);
95static DEFINE_MUTEX(mdp_fs_idle_pc_lock);
96
97static struct mdss_panel_intf pan_types[] = {
98 {"dsi", MDSS_PANEL_INTF_DSI},
99 {"edp", MDSS_PANEL_INTF_EDP},
100 {"hdmi", MDSS_PANEL_INTF_HDMI},
101};
102static char mdss_mdp_panel[MDSS_MAX_PANEL_LEN];
103
104struct mdss_hw mdss_mdp_hw = {
105 .hw_ndx = MDSS_HW_MDP,
106 .ptr = NULL,
107 .irq_handler = mdss_mdp_isr,
108};
109
110/* define for h/w block with external driver */
111struct mdss_hw mdss_misc_hw = {
112 .hw_ndx = MDSS_HW_MISC,
113 .ptr = NULL,
114 .irq_handler = NULL,
115};
116
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530117#ifdef CONFIG_QCOM_BUS_SCALING
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530118#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
119 { \
120 .src = MSM_BUS_MASTER_AMPSS_M0, \
121 .dst = MSM_BUS_SLAVE_DISPLAY_CFG, \
122 .ab = (ab_val), \
123 .ib = (ib_val), \
124 }
125
126#define BUS_VOTE_19_MHZ 153600000
127#define BUS_VOTE_40_MHZ 320000000
128#define BUS_VOTE_80_MHZ 640000000
129
130static struct msm_bus_vectors mdp_reg_bus_vectors[] = {
131 MDP_REG_BUS_VECTOR_ENTRY(0, 0),
132 MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
133 MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_40_MHZ),
134 MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_80_MHZ),
135};
136static struct msm_bus_paths mdp_reg_bus_usecases[ARRAY_SIZE(
137 mdp_reg_bus_vectors)];
138static struct msm_bus_scale_pdata mdp_reg_bus_scale_table = {
139 .usecase = mdp_reg_bus_usecases,
140 .num_usecases = ARRAY_SIZE(mdp_reg_bus_usecases),
141 .name = "mdss_reg",
142 .active_only = true,
143};
144#endif
145
146u32 invalid_mdp107_wb_output_fmts[] = {
147 MDP_XRGB_8888,
148 MDP_RGBX_8888,
149 MDP_BGRX_8888,
150};
151
152/*
153 * struct intr_call - array of intr handlers
154 * @func: intr handler
155 * @arg: requested argument to the handler
156 */
157struct intr_callback {
158 void (*func)(void *);
159 void *arg;
160};
161
162/*
163 * struct mdss_mdp_intr_reg - array of MDP intr register sets
164 * @clr_off: offset to CLEAR reg
165 * @en_off: offset to ENABLE reg
166 * @status_off: offset to STATUS reg
167 */
168struct mdss_mdp_intr_reg {
169 u32 clr_off;
170 u32 en_off;
171 u32 status_off;
172};
173
174/*
175 * struct mdss_mdp_irq - maps each irq with i/f
176 * @intr_type: type of interface
177 * @intf_num: i/f the irq is associated with
178 * @irq_mask: corresponding bit in the reg set
179 * @reg_idx: which reg set to program
180 */
181struct mdss_mdp_irq {
182 u32 intr_type;
183 u32 intf_num;
184 u32 irq_mask;
185 u32 reg_idx;
186};
187
188static struct mdss_mdp_intr_reg mdp_intr_reg[] = {
189 { MDSS_MDP_REG_INTR_CLEAR, MDSS_MDP_REG_INTR_EN,
190 MDSS_MDP_REG_INTR_STATUS },
191 { MDSS_MDP_REG_INTR2_CLEAR, MDSS_MDP_REG_INTR2_EN,
192 MDSS_MDP_REG_INTR2_STATUS }
193};
194
195static struct mdss_mdp_irq mdp_irq_map[] = {
196 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 1,
197 MDSS_MDP_INTR_INTF_0_UNDERRUN, 0},
198 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 2,
199 MDSS_MDP_INTR_INTF_1_UNDERRUN, 0},
200 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 3,
201 MDSS_MDP_INTR_INTF_2_UNDERRUN, 0},
202 { MDSS_MDP_IRQ_TYPE_INTF_UNDER_RUN, 4,
203 MDSS_MDP_INTR_INTF_3_UNDERRUN, 0},
204 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 1,
205 MDSS_MDP_INTR_INTF_0_VSYNC, 0},
206 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 2,
207 MDSS_MDP_INTR_INTF_1_VSYNC, 0},
208 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 3,
209 MDSS_MDP_INTR_INTF_2_VSYNC, 0},
210 { MDSS_MDP_IRQ_TYPE_INTF_VSYNC, 4,
211 MDSS_MDP_INTR_INTF_3_VSYNC, 0},
212 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 0,
213 MDSS_MDP_INTR_PING_PONG_0_DONE, 0},
214 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 1,
215 MDSS_MDP_INTR_PING_PONG_1_DONE, 0},
216 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 2,
217 MDSS_MDP_INTR_PING_PONG_2_DONE, 0},
218 { MDSS_MDP_IRQ_TYPE_PING_PONG_COMP, 3,
219 MDSS_MDP_INTR_PING_PONG_3_DONE, 0},
220 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 0,
221 MDSS_MDP_INTR_PING_PONG_0_RD_PTR, 0},
222 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 1,
223 MDSS_MDP_INTR_PING_PONG_1_RD_PTR, 0},
224 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 2,
225 MDSS_MDP_INTR_PING_PONG_2_RD_PTR, 0},
226 { MDSS_MDP_IRQ_TYPE_PING_PONG_RD_PTR, 3,
227 MDSS_MDP_INTR_PING_PONG_3_RD_PTR, 0},
228 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 0,
229 MDSS_MDP_INTR_PING_PONG_0_WR_PTR, 0},
230 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 1,
231 MDSS_MDP_INTR_PING_PONG_1_WR_PTR, 0},
232 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 2,
233 MDSS_MDP_INTR_PING_PONG_2_WR_PTR, 0},
234 { MDSS_MDP_IRQ_TYPE_PING_PONG_WR_PTR, 3,
235 MDSS_MDP_INTR_PING_PONG_3_WR_PTR, 0},
236 { MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 0,
237 MDSS_MDP_INTR_WB_0_DONE, 0},
238 { MDSS_MDP_IRQ_TYPE_WB_ROT_COMP, 1,
239 MDSS_MDP_INTR_WB_1_DONE, 0},
240 { MDSS_MDP_IRQ_TYPE_WB_WFD_COMP, 0,
241 MDSS_MDP_INTR_WB_2_DONE, 0},
242 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 0,
243 MDSS_MDP_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
244 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 1,
245 MDSS_MDP_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
246 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 2,
247 MDSS_MDP_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
248 { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 3,
249 MDSS_MDP_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
250 { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 2,
251 MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1},
252 { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 3,
253 MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1}
254};
255
256static struct intr_callback *mdp_intr_cb;
257
258static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on);
259static int mdss_mdp_parse_dt(struct platform_device *pdev);
260static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev);
261static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev);
262static int mdss_mdp_parse_dt_wb(struct platform_device *pdev);
263static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev);
264static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev);
265static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
266 char *prop_name, u32 *offsets, int len);
267static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
268 char *prop_name);
269static int mdss_mdp_parse_dt_smp(struct platform_device *pdev);
270static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev);
271static int mdss_mdp_parse_dt_misc(struct platform_device *pdev);
272static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev);
273static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev);
274static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev);
275static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev);
276static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev);
277
278static inline u32 is_mdp_irq_enabled(void)
279{
280 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
281 int i;
282
283 for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++)
284 if (mdata->mdp_irq_mask[i] != 0)
285 return 1;
286
287 if (mdata->mdp_hist_irq_mask)
288 return 1;
289
290 if (mdata->mdp_intf_irq_mask)
291 return 1;
292
293 return 0;
294}
295
296u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp)
297{
298 /* The adreno GPU hardware requires that the pitch be aligned to
299 * 32 pixels for color buffers, so for the cases where the GPU
300 * is writing directly to fb0, the framebuffer pitch
301 * also needs to be 32 pixel aligned
302 */
303
304 if (fb_index == 0)
305 return ALIGN(xres, 32) * bpp;
306 else
307 return xres * bpp;
308}
309
310static void mdss_irq_mask(struct irq_data *data)
311{
312 struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
313 unsigned long irq_flags;
314
315 if (!mdata)
316 return;
317
318 pr_debug("irq_domain_mask %lu\n", data->hwirq);
319
320 if (data->hwirq < 32) {
321 spin_lock_irqsave(&mdp_lock, irq_flags);
322 mdata->mdss_util->disable_irq(&mdss_misc_hw);
323 spin_unlock_irqrestore(&mdp_lock, irq_flags);
324 }
325}
326
327static void mdss_irq_unmask(struct irq_data *data)
328{
329 struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
330 unsigned long irq_flags;
331
332 if (!mdata)
333 return;
334
335 pr_debug("irq_domain_unmask %lu\n", data->hwirq);
336
337 if (data->hwirq < 32) {
338 spin_lock_irqsave(&mdp_lock, irq_flags);
339 mdata->mdss_util->enable_irq(&mdss_misc_hw);
340 spin_unlock_irqrestore(&mdp_lock, irq_flags);
341 }
342}
343
344static struct irq_chip mdss_irq_chip = {
345 .name = "mdss",
346 .irq_mask = mdss_irq_mask,
347 .irq_unmask = mdss_irq_unmask,
348};
349
350static int mdss_irq_domain_map(struct irq_domain *d,
351 unsigned int virq, irq_hw_number_t hw)
352{
353 struct mdss_data_type *mdata = d->host_data;
354 /* check here if virq is a valid interrupt line */
355 irq_set_chip_and_handler(virq, &mdss_irq_chip, handle_level_irq);
356 irq_set_chip_data(virq, mdata);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530357 return 0;
358}
359
360const struct irq_domain_ops mdss_irq_domain_ops = {
361 .map = mdss_irq_domain_map,
362 .xlate = irq_domain_xlate_onecell,
363};
364
365static irqreturn_t mdss_irq_handler(int irq, void *ptr)
366{
367 struct mdss_data_type *mdata = ptr;
368 u32 intr;
369
370 if (!mdata)
371 return IRQ_NONE;
372 else if (!mdss_get_irq_enable_state(&mdss_mdp_hw))
373 return IRQ_HANDLED;
374
375 intr = MDSS_REG_READ(mdata, MDSS_REG_HW_INTR_STATUS);
376
377 mdss_mdp_hw.irq_info->irq_buzy = true;
378
379 if (intr & MDSS_INTR_MDP) {
380 spin_lock(&mdp_lock);
381 mdata->mdss_util->irq_dispatch(MDSS_HW_MDP, irq, ptr);
382 spin_unlock(&mdp_lock);
383 intr &= ~MDSS_INTR_MDP;
384 }
385
386 if (intr & MDSS_INTR_DSI0) {
387 mdata->mdss_util->irq_dispatch(MDSS_HW_DSI0, irq, ptr);
388 intr &= ~MDSS_INTR_DSI0;
389 }
390
391 if (intr & MDSS_INTR_DSI1) {
392 mdata->mdss_util->irq_dispatch(MDSS_HW_DSI1, irq, ptr);
393 intr &= ~MDSS_INTR_DSI1;
394 }
395
396 if (intr & MDSS_INTR_EDP) {
397 mdata->mdss_util->irq_dispatch(MDSS_HW_EDP, irq, ptr);
398 intr &= ~MDSS_INTR_EDP;
399 }
400
401 if (intr & MDSS_INTR_HDMI) {
402 mdata->mdss_util->irq_dispatch(MDSS_HW_HDMI, irq, ptr);
403 intr &= ~MDSS_INTR_HDMI;
404 }
405
406 /* route misc. interrupts to external drivers */
407 while (intr) {
408 irq_hw_number_t hwirq = fls(intr) - 1;
409
410 generic_handle_irq(irq_find_mapping(
411 mdata->irq_domain, hwirq));
412 intr &= ~(1 << hwirq);
413 }
414
415 mdss_mdp_hw.irq_info->irq_buzy = false;
416
417 return IRQ_HANDLED;
418}
419
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530420#ifdef CONFIG_QCOM_BUS_SCALING
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530421static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
422{
423 struct msm_bus_scale_pdata *reg_bus_pdata;
424 int i, rc;
425
426 if (!mdata->bus_hdl) {
427 rc = mdss_mdp_parse_dt_bus_scale(mdata->pdev);
428 if (rc) {
429 pr_err("Error in device tree : bus scale\n");
430 return rc;
431 }
432
433 mdata->bus_hdl =
434 msm_bus_scale_register_client(mdata->bus_scale_table);
435 if (!mdata->bus_hdl) {
436 pr_err("bus_client register failed\n");
437 return -EINVAL;
438 }
439
440 pr_debug("register bus_hdl=%x\n", mdata->bus_hdl);
441 }
442
443 if (!mdata->reg_bus_scale_table) {
444 reg_bus_pdata = &mdp_reg_bus_scale_table;
445 for (i = 0; i < reg_bus_pdata->num_usecases; i++) {
446 mdp_reg_bus_usecases[i].num_paths = 1;
447 mdp_reg_bus_usecases[i].vectors =
448 &mdp_reg_bus_vectors[i];
449 }
450 mdata->reg_bus_scale_table = reg_bus_pdata;
451 }
452
453 if (!mdata->reg_bus_hdl) {
454 mdata->reg_bus_hdl =
455 msm_bus_scale_register_client(
456 mdata->reg_bus_scale_table);
457 if (!mdata->reg_bus_hdl)
458 /* Continue without reg_bus scaling */
459 pr_warn("reg_bus_client register failed\n");
460 else
461 pr_debug("register reg_bus_hdl=%x\n",
462 mdata->reg_bus_hdl);
463 }
464
465 if (mdata->hw_rt_bus_scale_table && !mdata->hw_rt_bus_hdl) {
466 mdata->hw_rt_bus_hdl =
467 msm_bus_scale_register_client(
468 mdata->hw_rt_bus_scale_table);
469 if (!mdata->hw_rt_bus_hdl)
470 /* Continue without reg_bus scaling */
471 pr_warn("hw_rt_bus client register failed\n");
472 else
473 pr_debug("register hw_rt_bus=%x\n",
474 mdata->hw_rt_bus_hdl);
475 }
476
477 /*
478 * Following call will not result in actual vote rather update the
479 * current index and ab/ib value. When continuous splash is enabled,
480 * actual vote will happen when splash handoff is done.
481 */
482 return mdss_bus_scale_set_quota(MDSS_MDP_RT, AB_QUOTA, IB_QUOTA);
483}
484
485static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
486{
487 pr_debug("unregister bus_hdl=%x\n", mdata->bus_hdl);
488
489 if (mdata->bus_hdl)
490 msm_bus_scale_unregister_client(mdata->bus_hdl);
491
492 pr_debug("unregister reg_bus_hdl=%x\n", mdata->reg_bus_hdl);
493
494 if (mdata->reg_bus_hdl) {
495 msm_bus_scale_unregister_client(mdata->reg_bus_hdl);
496 mdata->reg_bus_hdl = 0;
497 }
498
499 if (mdata->hw_rt_bus_hdl) {
500 msm_bus_scale_unregister_client(mdata->hw_rt_bus_hdl);
501 mdata->hw_rt_bus_hdl = 0;
502 }
503}
504
505/*
506 * Caller needs to hold mdata->bus_lock lock before calling this function.
507 */
508static int mdss_mdp_bus_scale_set_quota(u64 ab_quota_rt, u64 ab_quota_nrt,
509 u64 ib_quota_rt, u64 ib_quota_nrt)
510{
511 int new_uc_idx;
512 u64 ab_quota[MAX_AXI_PORT_COUNT] = {0, 0};
513 u64 ib_quota[MAX_AXI_PORT_COUNT] = {0, 0};
514 int rc;
515
516 if (mdss_res->bus_hdl < 1) {
517 pr_err("invalid bus handle %d\n", mdss_res->bus_hdl);
518 return -EINVAL;
519 }
520
521 if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt) {
522 new_uc_idx = 0;
523 } else {
524 int i;
525 struct msm_bus_vectors *vect = NULL;
526 struct msm_bus_scale_pdata *bw_table =
527 mdss_res->bus_scale_table;
528 u32 nrt_axi_port_cnt = mdss_res->nrt_axi_port_cnt;
529 u32 total_axi_port_cnt = mdss_res->axi_port_cnt;
530 u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
531 int match_cnt = 0;
532
533 if (!bw_table || !total_axi_port_cnt ||
534 total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
535 pr_err("invalid input\n");
536 return -EINVAL;
537 }
538
539 if (mdss_res->bus_channels) {
540 ib_quota_rt = div_u64(ib_quota_rt,
541 mdss_res->bus_channels);
542 ib_quota_nrt = div_u64(ib_quota_nrt,
543 mdss_res->bus_channels);
544 }
545
546 if (mdss_res->has_fixed_qos_arbiter_enabled ||
547 nrt_axi_port_cnt) {
548
549 ab_quota_rt = div_u64(ab_quota_rt, rt_axi_port_cnt);
550 ab_quota_nrt = div_u64(ab_quota_nrt, nrt_axi_port_cnt);
551
552 for (i = 0; i < total_axi_port_cnt; i++) {
553 if (i < rt_axi_port_cnt) {
554 ab_quota[i] = ab_quota_rt;
555 ib_quota[i] = ib_quota_rt;
556 } else {
557 ab_quota[i] = ab_quota_nrt;
558 ib_quota[i] = ib_quota_nrt;
559 }
560 }
561 } else {
562 ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt,
563 total_axi_port_cnt);
564 ib_quota[0] = ib_quota_rt + ib_quota_nrt;
565
566 for (i = 1; i < total_axi_port_cnt; i++) {
567 ab_quota[i] = ab_quota[0];
568 ib_quota[i] = ib_quota[0];
569 }
570 }
571
572 for (i = 0; i < total_axi_port_cnt; i++) {
573 vect = &bw_table->usecase
574 [mdss_res->curr_bw_uc_idx].vectors[i];
575 /* avoid performing updates for small changes */
576 if ((ab_quota[i] == vect->ab) &&
577 (ib_quota[i] == vect->ib))
578 match_cnt++;
579 }
580
581 if (match_cnt == total_axi_port_cnt) {
582 pr_debug("skip BW vote\n");
583 return 0;
584 }
585
586 new_uc_idx = (mdss_res->curr_bw_uc_idx %
587 (bw_table->num_usecases - 1)) + 1;
588
589 for (i = 0; i < total_axi_port_cnt; i++) {
590 vect = &bw_table->usecase[new_uc_idx].vectors[i];
591 vect->ab = ab_quota[i];
592 vect->ib = ib_quota[i];
593
594 pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
595 new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
596 , i, vect->ab, vect->ib);
597 }
598 }
599 mdss_res->curr_bw_uc_idx = new_uc_idx;
600 mdss_res->ao_bw_uc_idx = new_uc_idx;
601
602 if ((mdss_res->bus_ref_cnt == 0) && mdss_res->curr_bw_uc_idx) {
603 rc = 0;
604 } else { /* vote BW if bus_bw_cnt > 0 or uc_idx is zero */
605 ATRACE_BEGIN("msm_bus_scale_req");
606 rc = msm_bus_scale_client_update_request(mdss_res->bus_hdl,
607 new_uc_idx);
608 ATRACE_END("msm_bus_scale_req");
609 }
610 return rc;
611}
612
613struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
614{
615 struct reg_bus_client *client;
616 static u32 id;
617
618 if (client_name == NULL) {
619 pr_err("client name is null\n");
620 return ERR_PTR(-EINVAL);
621 }
622
623 client = kcalloc(1, sizeof(struct reg_bus_client), GFP_KERNEL);
624 if (!client)
625 return ERR_PTR(-ENOMEM);
626
627 mutex_lock(&mdss_res->reg_bus_lock);
628 strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
629 client->usecase_ndx = VOTE_INDEX_DISABLE;
630 client->id = id;
631 pr_debug("bus vote client %s created:%pK id :%d\n", client_name,
632 client, id);
633 id++;
634 list_add(&client->list, &mdss_res->reg_bus_clist);
635 mutex_unlock(&mdss_res->reg_bus_lock);
636
637 return client;
638}
639
640void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
641{
642 if (!client) {
643 pr_err("reg bus vote: invalid client handle\n");
644 } else {
645 pr_debug("bus vote client %s destroyed:%pK id:%u\n",
646 client->name, client, client->id);
647 mutex_lock(&mdss_res->reg_bus_lock);
648 list_del_init(&client->list);
649 mutex_unlock(&mdss_res->reg_bus_lock);
650 kfree(client);
651 }
652}
653
654int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
655{
656 int ret = 0;
657 bool changed = false;
658 u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
659 struct reg_bus_client *client, *temp_client;
660
661 if (!mdss_res || !mdss_res->reg_bus_hdl || !bus_client)
662 return 0;
663
664 mutex_lock(&mdss_res->reg_bus_lock);
665 bus_client->usecase_ndx = usecase_ndx;
666 list_for_each_entry_safe(client, temp_client, &mdss_res->reg_bus_clist,
667 list) {
668
669 if (client->usecase_ndx < VOTE_INDEX_MAX &&
670 client->usecase_ndx > max_usecase_ndx)
671 max_usecase_ndx = client->usecase_ndx;
672 }
673
674 if (mdss_res->reg_bus_usecase_ndx != max_usecase_ndx) {
675 changed = true;
676 mdss_res->reg_bus_usecase_ndx = max_usecase_ndx;
677 }
678
679 pr_debug("%pS: changed=%d current idx=%d request client %s id:%u idx:%d\n",
680 __builtin_return_address(0), changed, max_usecase_ndx,
681 bus_client->name, bus_client->id, usecase_ndx);
682 MDSS_XLOG(changed, max_usecase_ndx, bus_client->id, usecase_ndx);
683 if (changed)
684 ret = msm_bus_scale_client_update_request(mdss_res->reg_bus_hdl,
685 max_usecase_ndx);
686
687 mutex_unlock(&mdss_res->reg_bus_lock);
688 return ret;
689}
690
691int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
692{
693 int rc = 0;
694 int i;
695 u64 total_ab_rt = 0, total_ib_rt = 0;
696 u64 total_ab_nrt = 0, total_ib_nrt = 0;
697
698 mutex_lock(&mdss_res->bus_lock);
699
700 mdss_res->ab[client] = ab_quota;
701 mdss_res->ib[client] = ib_quota;
702 trace_mdp_perf_update_bus(client, ab_quota, ib_quota);
703
704 for (i = 0; i < MDSS_MAX_BUS_CLIENTS; i++) {
705 if (i == MDSS_MDP_NRT) {
706 total_ab_nrt = mdss_res->ab[i];
707 total_ib_nrt = mdss_res->ib[i];
708 } else {
709 total_ab_rt += mdss_res->ab[i];
710 total_ib_rt = max(total_ib_rt, mdss_res->ib[i]);
711 }
712 }
713
714 rc = mdss_mdp_bus_scale_set_quota(total_ab_rt, total_ab_nrt,
715 total_ib_rt, total_ib_nrt);
716
717 mutex_unlock(&mdss_res->bus_lock);
718
719 return rc;
720}
721#else
722static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
723{
724 return 0;
725}
726
727static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
728{
729}
730
731int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
732{
733 pr_debug("No bus scaling! client=%d ab=%llu ib=%llu\n",
734 client, ab_quota, ib_quota);
735
736 return 0;
737}
738
739struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
740{
741 return NULL;
742}
743
744void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
745{
746}
747
748int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
749{
750 pr_debug("%pS: No reg scaling! usecase=%u\n",
751 __builtin_return_address(0), usecase_ndx);
752
753 return 0;
754}
755#endif
756
757
758static int mdss_mdp_intr2index(u32 intr_type, u32 intf_num)
759{
760 int i;
761
762 for (i = 0; i < ARRAY_SIZE(mdp_irq_map); i++) {
763 if (intr_type == mdp_irq_map[i].intr_type &&
764 intf_num == mdp_irq_map[i].intf_num)
765 return i;
766 }
767 return -EINVAL;
768}
769
770u32 mdss_mdp_get_irq_mask(u32 intr_type, u32 intf_num)
771{
772 int idx = mdss_mdp_intr2index(intr_type, intf_num);
773
774 return (idx < 0) ? 0 : mdp_irq_map[idx].irq_mask;
775}
776
777void mdss_mdp_enable_hw_irq(struct mdss_data_type *mdata)
778{
779 mdata->mdss_util->enable_irq(&mdss_mdp_hw);
780}
781
782void mdss_mdp_disable_hw_irq(struct mdss_data_type *mdata)
783{
784 if (!is_mdp_irq_enabled())
785 mdata->mdss_util->disable_irq(&mdss_mdp_hw);
786}
787
788/* function assumes that mdp is clocked to access hw registers */
789void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
790 u32 intr_type, u32 intf_num)
791{
792 unsigned long irq_flags;
793 int irq_idx;
794 struct mdss_mdp_intr_reg reg;
795 struct mdss_mdp_irq irq;
796
797 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
798 if (irq_idx < 0) {
799 pr_err("invalid irq request\n");
800 return;
801 }
802
803 irq = mdp_irq_map[irq_idx];
804 reg = mdp_intr_reg[irq.reg_idx];
805
806 pr_debug("clearing mdp irq mask=%x\n", irq.irq_mask);
807 spin_lock_irqsave(&mdp_lock, irq_flags);
808 writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
809 spin_unlock_irqrestore(&mdp_lock, irq_flags);
810}
811
812int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
813{
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530814 int irq_idx;
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530815 unsigned long irq_flags;
816 int ret = 0;
817 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
818 struct mdss_mdp_intr_reg reg;
819 struct mdss_mdp_irq irq;
820
821 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
822 if (irq_idx < 0) {
823 pr_err("invalid irq request\n");
824 return -EINVAL;
825 }
826
827 irq = mdp_irq_map[irq_idx];
828 reg = mdp_intr_reg[irq.reg_idx];
829
830 spin_lock_irqsave(&mdp_lock, irq_flags);
831 if (mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask) {
832 pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n",
Sachin Bhayare3d3767e2018-01-02 21:10:57 +0530833 irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
Sachin Bhayareeeb88892018-01-02 16:36:01 +0530834 ret = -EBUSY;
835 } else {
836 pr_debug("MDP IRQ mask old=%x new=%x\n",
837 mdata->mdp_irq_mask[irq.reg_idx], irq.irq_mask);
838 mdata->mdp_irq_mask[irq.reg_idx] |= irq.irq_mask;
839 writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
840 writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
841 mdata->mdp_base + reg.en_off);
842 mdata->mdss_util->enable_irq(&mdss_mdp_hw);
843 }
844 spin_unlock_irqrestore(&mdp_lock, irq_flags);
845
846 return ret;
847}
848int mdss_mdp_hist_irq_enable(u32 irq)
849{
850 int ret = 0;
851 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
852
853 if (mdata->mdp_hist_irq_mask & irq) {
854 pr_warn("MDSS MDP Hist IRQ-0x%x is already set, mask=%x\n",
855 irq, mdata->mdp_hist_irq_mask);
856 ret = -EBUSY;
857 } else {
858 pr_debug("mask old=%x new=%x\n",
859 mdata->mdp_hist_irq_mask, irq);
860 mdata->mdp_hist_irq_mask |= irq;
861 writel_relaxed(irq, mdata->mdp_base +
862 MDSS_MDP_REG_HIST_INTR_CLEAR);
863 writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base +
864 MDSS_MDP_REG_HIST_INTR_EN);
865 mdata->mdss_util->enable_irq(&mdss_mdp_hw);
866 }
867
868 return ret;
869}
870
871void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num)
872{
873 int irq_idx;
874 unsigned long irq_flags;
875 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
876 struct mdss_mdp_intr_reg reg;
877 struct mdss_mdp_irq irq;
878
879 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
880 if (irq_idx < 0) {
881 pr_err("invalid irq request\n");
882 return;
883 }
884
885 irq = mdp_irq_map[irq_idx];
886 reg = mdp_intr_reg[irq.reg_idx];
887
888 spin_lock_irqsave(&mdp_lock, irq_flags);
889 if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) {
890 pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
891 irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
892 } else {
893 mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask;
894 writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
895 mdata->mdp_base + reg.en_off);
896 if (!is_mdp_irq_enabled())
897 mdata->mdss_util->disable_irq(&mdss_mdp_hw);
898 }
899 spin_unlock_irqrestore(&mdp_lock, irq_flags);
900}
901
902/* This function is used to check and clear the status of MDP interrupts */
903void mdss_mdp_intr_check_and_clear(u32 intr_type, u32 intf_num)
904{
905 u32 status;
906 int irq_idx;
907 unsigned long irq_flags;
908 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
909 struct mdss_mdp_intr_reg reg;
910 struct mdss_mdp_irq irq;
911
912 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
913 if (irq_idx < 0) {
914 pr_err("invalid irq request\n");
915 return;
916 }
917
918 irq = mdp_irq_map[irq_idx];
919 reg = mdp_intr_reg[irq.reg_idx];
920
921 spin_lock_irqsave(&mdp_lock, irq_flags);
922 status = irq.irq_mask & readl_relaxed(mdata->mdp_base +
923 reg.status_off);
924 if (status) {
925 pr_debug("clearing irq: intr_type:%d, intf_num:%d\n",
926 intr_type, intf_num);
927 writel_relaxed(irq.irq_mask, mdata->mdp_base + reg.clr_off);
928 }
929 spin_unlock_irqrestore(&mdp_lock, irq_flags);
930}
931
932void mdss_mdp_hist_irq_disable(u32 irq)
933{
934 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
935
936 if (!(mdata->mdp_hist_irq_mask & irq)) {
937 pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
938 irq, mdata->mdp_hist_irq_mask);
939 } else {
940 mdata->mdp_hist_irq_mask &= ~irq;
941 writel_relaxed(mdata->mdp_hist_irq_mask, mdata->mdp_base +
942 MDSS_MDP_REG_HIST_INTR_EN);
943 if (!is_mdp_irq_enabled())
944 mdata->mdss_util->disable_irq(&mdss_mdp_hw);
945 }
946}
947
948/**
949 * mdss_mdp_irq_disable_nosync() - disable mdp irq
950 * @intr_type: mdp interface type
951 * @intf_num: mdp interface num
952 *
953 * This function is called from interrupt context
954 * mdp_lock is already held at up stream (mdss_irq_handler)
955 * therefore spin_lock(&mdp_lock) is not allowed here
956 *
957 */
958void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num)
959{
960 int irq_idx;
961 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
962 struct mdss_mdp_intr_reg reg;
963 struct mdss_mdp_irq irq;
964
965 irq_idx = mdss_mdp_intr2index(intr_type, intf_num);
966 if (irq_idx < 0) {
967 pr_err("invalid irq request\n");
968 return;
969 }
970
971 irq = mdp_irq_map[irq_idx];
972 reg = mdp_intr_reg[irq.reg_idx];
973
974 if (!(mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask)) {
975 pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
976 irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]);
977 } else {
978 mdata->mdp_irq_mask[irq.reg_idx] &= ~irq.irq_mask;
979 writel_relaxed(mdata->mdp_irq_mask[irq.reg_idx],
980 mdata->mdp_base + reg.en_off);
981 if (!is_mdp_irq_enabled())
982 mdata->mdss_util->disable_irq_nosync(&mdss_mdp_hw);
983 }
984}
985
986int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
987 void (*fnc_ptr)(void *), void *arg)
988{
989 unsigned long flags;
990 int index;
991
992 index = mdss_mdp_intr2index(intr_type, intf_num);
993 if (index < 0) {
994 pr_warn("invalid intr type=%u intf_numf_num=%u\n",
995 intr_type, intf_num);
996 return -EINVAL;
997 }
998
999 spin_lock_irqsave(&mdss_mdp_intr_lock, flags);
1000 WARN(mdp_intr_cb[index].func && fnc_ptr,
1001 "replacing current intr callback for ndx=%d\n", index);
1002 mdp_intr_cb[index].func = fnc_ptr;
1003 mdp_intr_cb[index].arg = arg;
1004 spin_unlock_irqrestore(&mdss_mdp_intr_lock, flags);
1005
1006 return 0;
1007}
1008
1009int mdss_mdp_set_intr_callback_nosync(u32 intr_type, u32 intf_num,
1010 void (*fnc_ptr)(void *), void *arg)
1011{
1012 int index;
1013
1014 index = mdss_mdp_intr2index(intr_type, intf_num);
1015 if (index < 0) {
1016 pr_warn("invalid intr Typee=%u intf_num=%u\n",
1017 intr_type, intf_num);
1018 return -EINVAL;
1019 }
1020
1021 WARN(mdp_intr_cb[index].func && fnc_ptr,
1022 "replacing current intr callbackack for ndx=%d\n",
1023 index);
1024 mdp_intr_cb[index].func = fnc_ptr;
1025 mdp_intr_cb[index].arg = arg;
1026
1027 return 0;
1028}
1029
1030static inline void mdss_mdp_intr_done(int index)
1031{
1032 void (*fnc)(void *);
1033 void *arg;
1034
1035 spin_lock(&mdss_mdp_intr_lock);
1036 fnc = mdp_intr_cb[index].func;
1037 arg = mdp_intr_cb[index].arg;
1038 spin_unlock(&mdss_mdp_intr_lock);
1039 if (fnc)
1040 fnc(arg);
1041}
1042
1043irqreturn_t mdss_mdp_isr(int irq, void *ptr)
1044{
1045 struct mdss_data_type *mdata = ptr;
1046 u32 isr, mask, hist_isr, hist_mask;
1047 int i, j;
1048
1049 if (!mdata->clk_ena)
1050 return IRQ_HANDLED;
1051
1052 for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++) {
1053 struct mdss_mdp_intr_reg reg = mdp_intr_reg[i];
1054
1055 isr = readl_relaxed(mdata->mdp_base + reg.status_off);
1056 if (isr == 0)
1057 continue;
1058
1059 mask = readl_relaxed(mdata->mdp_base + reg.en_off);
1060 writel_relaxed(isr, mdata->mdp_base + reg.clr_off);
1061
1062 pr_debug("%s: reg:%d isr=%x mask=%x\n",
1063 __func__, i+1, isr, mask);
1064
1065 isr &= mask;
1066 if (isr == 0)
1067 continue;
1068
1069 for (j = 0; j < ARRAY_SIZE(mdp_irq_map); j++)
1070 if (mdp_irq_map[j].reg_idx == i &&
1071 (isr & mdp_irq_map[j].irq_mask))
1072 mdss_mdp_intr_done(j);
1073 if (!i) {
1074 if (isr & MDSS_MDP_INTR_PING_PONG_0_DONE)
1075 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0,
1076 false);
1077
1078 if (isr & MDSS_MDP_INTR_PING_PONG_1_DONE)
1079 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1,
1080 false);
1081
1082 if (isr & MDSS_MDP_INTR_INTF_0_VSYNC)
1083 mdss_misr_crc_collect(mdata, DISPLAY_MISR_EDP,
1084 true);
1085
1086 if (isr & MDSS_MDP_INTR_INTF_1_VSYNC)
1087 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI0,
1088 true);
1089
1090 if (isr & MDSS_MDP_INTR_INTF_2_VSYNC)
1091 mdss_misr_crc_collect(mdata, DISPLAY_MISR_DSI1,
1092 true);
1093
1094 if (isr & MDSS_MDP_INTR_INTF_3_VSYNC)
1095 mdss_misr_crc_collect(mdata, DISPLAY_MISR_HDMI,
1096 true);
1097
1098 if (isr & MDSS_MDP_INTR_WB_0_DONE)
1099 mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
1100 true);
1101
1102 if (isr & MDSS_MDP_INTR_WB_1_DONE)
1103 mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
1104 true);
1105
1106 if (isr & MDSS_MDP_INTR_WB_2_DONE)
1107 mdss_misr_crc_collect(mdata, DISPLAY_MISR_MDP,
1108 true);
1109 }
1110 }
1111
1112 hist_isr = readl_relaxed(mdata->mdp_base +
1113 MDSS_MDP_REG_HIST_INTR_STATUS);
1114 if (hist_isr != 0) {
1115 hist_mask = readl_relaxed(mdata->mdp_base +
1116 MDSS_MDP_REG_HIST_INTR_EN);
1117 writel_relaxed(hist_isr, mdata->mdp_base +
1118 MDSS_MDP_REG_HIST_INTR_CLEAR);
1119 hist_isr &= hist_mask;
1120 if (hist_isr != 0)
1121 mdss_mdp_hist_intr_done(hist_isr);
1122 }
1123
1124 mdss_mdp_video_isr(mdata->video_intf, mdata->nintf);
1125 return IRQ_HANDLED;
1126}
1127
1128static int mdss_mdp_clk_update(u32 clk_idx, u32 enable)
1129{
1130 int ret = -ENODEV;
1131 struct clk *clk = mdss_mdp_get_clk(clk_idx);
1132
1133 if (clk) {
1134 pr_debug("clk=%d en=%d\n", clk_idx, enable);
1135 if (enable) {
1136 if (clk_idx == MDSS_CLK_MDP_VSYNC)
1137 clk_set_rate(clk, 19200000);
1138 ret = clk_prepare_enable(clk);
1139 } else {
1140 clk_disable_unprepare(clk);
1141 ret = 0;
1142 }
1143 }
1144 return ret;
1145}
1146
1147int mdss_mdp_vsync_clk_enable(int enable, bool locked)
1148{
1149 int ret = 0;
1150
1151 pr_debug("clk enable=%d\n", enable);
1152
1153 if (!locked)
1154 mutex_lock(&mdp_clk_lock);
1155
1156 if (mdss_res->vsync_ena != enable) {
1157 mdss_res->vsync_ena = enable;
1158 ret = mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
1159 }
1160
1161 if (!locked)
1162 mutex_unlock(&mdp_clk_lock);
1163 return ret;
1164}
1165
1166void mdss_mdp_set_clk_rate(unsigned long rate)
1167{
1168 struct mdss_data_type *mdata = mdss_res;
1169 unsigned long clk_rate;
1170 struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
1171 unsigned long min_clk_rate;
1172
1173 min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk);
1174
1175 if (clk) {
1176 mutex_lock(&mdp_clk_lock);
1177 if (min_clk_rate < mdata->max_mdp_clk_rate)
1178 clk_rate = clk_round_rate(clk, min_clk_rate);
1179 else
1180 clk_rate = mdata->max_mdp_clk_rate;
1181 if (IS_ERR_VALUE(clk_rate)) {
1182 pr_err("unable to round rate err=%ld\n", clk_rate);
1183 } else if (clk_rate != clk_get_rate(clk)) {
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301184 if (IS_ERR_VALUE((unsigned long)
1185 clk_set_rate(clk, clk_rate)))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301186 pr_err("clk_set_rate failed\n");
1187 else
1188 pr_debug("mdp clk rate=%lu\n", clk_rate);
1189 }
1190 mutex_unlock(&mdp_clk_lock);
1191 } else {
1192 pr_err("mdp src clk not setup properly\n");
1193 }
1194}
1195
1196unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked)
1197{
1198 unsigned long clk_rate = 0;
1199 struct clk *clk = mdss_mdp_get_clk(clk_idx);
1200
1201 if (clk) {
1202 if (!locked)
1203 mutex_lock(&mdp_clk_lock);
1204
1205 clk_rate = clk_get_rate(clk);
1206
1207 if (!locked)
1208 mutex_unlock(&mdp_clk_lock);
1209 }
1210
1211 return clk_rate;
1212}
1213
1214/**
1215 * mdss_bus_rt_bw_vote() -- place bus bandwidth request
1216 * @enable: value of enable or disable
1217 *
1218 * hw_rt table has two entries, 0 and Min Vote (1Mhz)
1219 * while attaching SMMU and for few TZ operations which
1220 * happen at very early stage, we will request Min Vote
1221 * thru this handle.
1222 *
1223 */
1224static int mdss_bus_rt_bw_vote(bool enable)
1225{
1226 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1227 int rc = 0;
1228 bool changed = false;
1229
1230 if (!mdata->hw_rt_bus_hdl || mdata->handoff_pending)
1231 return 0;
1232
1233 if (enable) {
1234 if (mdata->hw_rt_bus_ref_cnt == 0)
1235 changed = true;
1236 mdata->hw_rt_bus_ref_cnt++;
1237 } else {
1238 if (mdata->hw_rt_bus_ref_cnt != 0) {
1239 mdata->hw_rt_bus_ref_cnt--;
1240 if (mdata->hw_rt_bus_ref_cnt == 0)
1241 changed = true;
1242 } else {
1243 pr_warn("%s: bus bw votes are not balanced\n",
1244 __func__);
1245 }
1246 }
1247
1248 pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
1249 __builtin_return_address(0), current->group_leader->comm,
1250 mdata->hw_rt_bus_ref_cnt, changed, enable);
1251
1252 if (changed) {
1253 rc = msm_bus_scale_client_update_request(mdata->hw_rt_bus_hdl,
1254 enable ? 1 : 0);
1255 if (rc)
1256 pr_err("%s: Bus bandwidth vote failed\n", __func__);
1257 }
1258
1259 return rc;
1260}
1261
1262/**
1263 * __mdss_mdp_reg_access_clk_enable - Enable minimum MDSS clocks required
1264 * for register access
1265 */
1266static inline void __mdss_mdp_reg_access_clk_enable(
1267 struct mdss_data_type *mdata, bool enable)
1268{
1269 if (enable) {
1270 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1271 VOTE_INDEX_LOW);
1272 mdss_bus_rt_bw_vote(true);
1273 mdss_mdp_clk_update(MDSS_CLK_AHB, 1);
1274 mdss_mdp_clk_update(MDSS_CLK_AXI, 1);
1275 mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1);
1276 } else {
1277 mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 0);
1278 mdss_mdp_clk_update(MDSS_CLK_AXI, 0);
1279 mdss_mdp_clk_update(MDSS_CLK_AHB, 0);
1280 mdss_bus_rt_bw_vote(false);
1281 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1282 VOTE_INDEX_DISABLE);
1283 }
1284}
1285
1286int __mdss_mdp_vbif_halt(struct mdss_data_type *mdata, bool is_nrt)
1287{
1288 int rc = 0;
1289 void __iomem *base;
1290 u32 halt_ack_mask = BIT(0), status;
1291
1292 /* if not real time vbif */
1293 if (is_nrt)
1294 base = mdata->vbif_nrt_io.base;
1295 else
1296 base = mdata->vbif_io.base;
1297
1298 if (!base) {
1299 /* some targets might not have a nrt port */
1300 goto vbif_done;
1301 }
1302
1303 /* force vbif clock on */
1304 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 1, is_nrt);
1305
1306 /* request halt */
1307 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_AXI_HALT_CTRL0, 1, is_nrt);
1308
1309 rc = readl_poll_timeout(base +
1310 MMSS_VBIF_AXI_HALT_CTRL1, status, (status &
1311 halt_ack_mask),
1312 1000, AXI_HALT_TIMEOUT_US);
1313 if (rc == -ETIMEDOUT) {
1314 pr_err("VBIF axi is not halting. TIMEDOUT.\n");
1315 goto vbif_done;
1316 }
1317
1318 pr_debug("VBIF axi is halted\n");
1319
1320vbif_done:
1321 return rc;
1322}
1323
1324/**
1325 * mdss_mdp_vbif_axi_halt() - Halt MDSS AXI ports
1326 * @mdata: pointer to the global mdss data structure.
1327 *
1328 * This function can be called during deep suspend, display off or for
1329 * debugging purposes. On success it should be assumed that AXI ports connected
1330 * to RT VBIF are in idle state and would not fetch any more data.
1331 */
1332static void mdss_mdp_vbif_axi_halt(struct mdss_data_type *mdata)
1333{
1334 __mdss_mdp_reg_access_clk_enable(mdata, true);
1335
1336 /* real time ports */
1337 __mdss_mdp_vbif_halt(mdata, false);
1338 /* non-real time ports */
1339 __mdss_mdp_vbif_halt(mdata, true);
1340
1341 __mdss_mdp_reg_access_clk_enable(mdata, false);
1342}
1343
1344int mdss_iommu_ctrl(int enable)
1345{
1346 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1347 int rc = 0;
1348
1349 mutex_lock(&mdp_iommu_ref_cnt_lock);
1350 pr_debug("%pS: enable:%d ref_cnt:%d attach:%d hoff:%d\n",
1351 __builtin_return_address(0), enable, mdata->iommu_ref_cnt,
1352 mdata->iommu_attached, mdata->handoff_pending);
1353
1354 if (enable) {
1355 /*
1356 * delay iommu attach until continuous splash screen has
1357 * finished handoff, as it may still be working with phys addr
1358 */
1359 if (!mdata->iommu_attached && !mdata->handoff_pending) {
1360 mdss_bus_rt_bw_vote(true);
1361 rc = mdss_smmu_attach(mdata);
1362 }
1363 mdata->iommu_ref_cnt++;
1364 } else {
1365 if (mdata->iommu_ref_cnt) {
1366 mdata->iommu_ref_cnt--;
1367 if (mdata->iommu_ref_cnt == 0) {
1368 rc = mdss_smmu_detach(mdata);
1369 mdss_bus_rt_bw_vote(false);
1370 }
1371 } else {
1372 pr_err("unbalanced iommu ref\n");
1373 }
1374 }
1375 mutex_unlock(&mdp_iommu_ref_cnt_lock);
1376
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301377 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301378 return rc;
1379 else
1380 return mdata->iommu_ref_cnt;
1381}
1382
1383static void mdss_mdp_memory_retention_enter(void)
1384{
1385 struct clk *mdss_mdp_clk = NULL;
1386 struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
1387
1388 if (mdp_vote_clk) {
1389 mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
1390 if (mdss_mdp_clk) {
1391 clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
1392 clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET);
1393 clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH);
1394 }
1395 }
1396}
1397
1398static void mdss_mdp_memory_retention_exit(void)
1399{
1400 struct clk *mdss_mdp_clk = NULL;
1401 struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
1402
1403 if (mdp_vote_clk) {
1404 mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
1405 if (mdss_mdp_clk) {
1406 clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
1407 clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH);
1408 clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_CLEAR);
1409 }
1410 }
1411}
1412
1413/**
1414 * mdss_mdp_idle_pc_restore() - Restore MDSS settings when exiting idle pc
1415 *
1416 * MDSS GDSC can be voted off during idle-screen usecase for MIPI DSI command
1417 * mode displays, referred to as MDSS idle power collapse. Upon subsequent
1418 * frame update, MDSS GDSC needs to turned back on and hw state needs to be
1419 * restored.
1420 */
1421static int mdss_mdp_idle_pc_restore(void)
1422{
1423 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1424 int rc = 0;
1425
1426 mutex_lock(&mdp_fs_idle_pc_lock);
1427 if (!mdata->idle_pc) {
1428 pr_debug("no idle pc, no need to restore\n");
1429 goto end;
1430 }
1431
1432 pr_debug("called from %pS\n", __builtin_return_address(0));
1433 rc = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301434 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301435 pr_err("mdss iommu attach failed rc=%d\n", rc);
1436 goto end;
1437 }
1438 mdss_hw_init(mdata);
1439 mdss_iommu_ctrl(0);
1440
1441 /**
1442 * sleep 10 microseconds to make sure AD auto-reinitialization
1443 * is done
1444 */
1445 udelay(10);
1446 mdss_mdp_memory_retention_exit();
1447
1448 mdss_mdp_ctl_restore(true);
1449 mdata->idle_pc = false;
1450
1451end:
1452 mutex_unlock(&mdp_fs_idle_pc_lock);
1453 return rc;
1454}
1455
1456/**
1457 * mdss_bus_bandwidth_ctrl() -- place bus bandwidth request
1458 * @enable: value of enable or disable
1459 *
1460 * Function place bus bandwidth request to allocate saved bandwidth
1461 * if enabled or free bus bandwidth allocation if disabled.
1462 * Bus bandwidth is required by mdp.For dsi, it only requires to send
1463 * dcs coammnd. It returns error if bandwidth request fails.
1464 */
1465void mdss_bus_bandwidth_ctrl(int enable)
1466{
1467 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1468 int changed = 0;
1469
1470 mutex_lock(&mdata->bus_lock);
1471 if (enable) {
1472 if (mdata->bus_ref_cnt == 0)
1473 changed++;
1474 mdata->bus_ref_cnt++;
1475 } else {
1476 if (mdata->bus_ref_cnt) {
1477 mdata->bus_ref_cnt--;
1478 if (mdata->bus_ref_cnt == 0)
1479 changed++;
1480 } else {
1481 pr_err("Can not be turned off\n");
1482 }
1483 }
1484
1485 pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
1486 __builtin_return_address(0), current->group_leader->comm,
1487 mdata->bus_ref_cnt, changed, enable);
1488
1489 if (changed) {
1490 MDSS_XLOG(mdata->bus_ref_cnt, enable);
1491
1492 if (!enable) {
1493 if (!mdata->handoff_pending) {
1494 msm_bus_scale_client_update_request(
1495 mdata->bus_hdl, 0);
1496 mdata->ao_bw_uc_idx = 0;
1497 }
1498 pm_runtime_mark_last_busy(&mdata->pdev->dev);
1499 pm_runtime_put_autosuspend(&mdata->pdev->dev);
1500 } else {
1501 pm_runtime_get_sync(&mdata->pdev->dev);
1502 msm_bus_scale_client_update_request(
1503 mdata->bus_hdl, mdata->curr_bw_uc_idx);
1504 }
1505 }
1506
1507 mutex_unlock(&mdata->bus_lock);
1508}
1509EXPORT_SYMBOL(mdss_bus_bandwidth_ctrl);
1510
1511void mdss_mdp_clk_ctrl(int enable)
1512{
1513 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
1514 static int mdp_clk_cnt;
1515 unsigned long flags;
1516 int changed = 0;
1517 int rc = 0;
1518
1519 mutex_lock(&mdp_clk_lock);
1520 if (enable) {
1521 if (mdp_clk_cnt == 0)
1522 changed++;
1523 mdp_clk_cnt++;
1524 } else {
1525 if (mdp_clk_cnt) {
1526 mdp_clk_cnt--;
1527 if (mdp_clk_cnt == 0)
1528 changed++;
1529 } else {
1530 pr_err("Can not be turned off\n");
1531 }
1532 }
1533
1534 if (changed)
1535 MDSS_XLOG(mdp_clk_cnt, enable, current->pid);
1536
1537 pr_debug("%pS: task:%s clk_cnt=%d changed=%d enable=%d\n",
1538 __builtin_return_address(0), current->group_leader->comm,
1539 mdata->bus_ref_cnt, changed, enable);
1540
1541 if (changed) {
1542 if (enable) {
1543 pm_runtime_get_sync(&mdata->pdev->dev);
1544
1545 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1546 VOTE_INDEX_LOW);
1547
1548 rc = mdss_iommu_ctrl(1);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301549 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301550 pr_err("IOMMU attach failed\n");
1551
1552 /* Active+Sleep */
1553 msm_bus_scale_client_update_context(mdata->bus_hdl,
1554 false, mdata->curr_bw_uc_idx);
1555 }
1556
1557 spin_lock_irqsave(&mdp_lock, flags);
1558 mdata->clk_ena = enable;
1559 spin_unlock_irqrestore(&mdp_lock, flags);
1560
1561 mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
1562 mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
1563 mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
1564 mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
1565 if (mdata->vsync_ena)
1566 mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
1567
1568 if (!enable) {
1569 /* release iommu control */
1570 mdss_iommu_ctrl(0);
1571
1572 /* Active-Only */
1573 msm_bus_scale_client_update_context(mdata->bus_hdl,
1574 true, mdata->ao_bw_uc_idx);
1575
1576 mdss_update_reg_bus_vote(mdata->reg_bus_clt,
1577 VOTE_INDEX_DISABLE);
1578
1579 pm_runtime_mark_last_busy(&mdata->pdev->dev);
1580 pm_runtime_put_autosuspend(&mdata->pdev->dev);
1581 }
1582 }
1583
1584 if (enable && changed)
1585 mdss_mdp_idle_pc_restore();
1586
1587 mutex_unlock(&mdp_clk_lock);
1588}
1589
1590static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata,
1591 char *clk_name, int clk_idx)
1592{
1593 struct clk *tmp;
1594
1595 if (clk_idx >= MDSS_MAX_CLK) {
1596 pr_err("invalid clk index %d\n", clk_idx);
1597 return -EINVAL;
1598 }
1599
1600 tmp = devm_clk_get(&mdata->pdev->dev, clk_name);
1601 if (IS_ERR(tmp)) {
1602 pr_err("unable to get clk: %s\n", clk_name);
1603 return PTR_ERR(tmp);
1604 }
1605
1606 mdata->mdp_clk[clk_idx] = tmp;
1607 return 0;
1608}
1609
1610#define SEC_DEVICE_MDSS 1
1611
1612static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata)
1613{
1614 int ret, scm_ret = 0;
1615
1616 if (test_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map))
1617 return;
1618
1619 pr_debug("restoring mdss secure config\n");
1620
1621 __mdss_mdp_reg_access_clk_enable(mdata, true);
1622
1623 ret = scm_restore_sec_cfg(SEC_DEVICE_MDSS, 0, &scm_ret);
1624 if (ret || scm_ret)
1625 pr_warn("scm_restore_sec_cfg failed %d %d\n",
1626 ret, scm_ret);
1627
1628 __mdss_mdp_reg_access_clk_enable(mdata, false);
1629}
1630
1631static int mdss_mdp_gdsc_notifier_call(struct notifier_block *self,
1632 unsigned long event, void *data)
1633{
1634 struct mdss_data_type *mdata;
1635
1636 mdata = container_of(self, struct mdss_data_type, gdsc_cb);
1637
1638 if (event & REGULATOR_EVENT_ENABLE) {
1639 /*
1640 * As SMMU in low tier targets is not power collapsible,
1641 * hence we don't need to restore sec configuration.
1642 */
1643 if (!mdss_mdp_req_init_restore_cfg(mdata))
1644 __mdss_restore_sec_cfg(mdata);
1645 } else if (event & REGULATOR_EVENT_PRE_DISABLE) {
1646 pr_debug("mdss gdsc is getting disabled\n");
1647 /* halt the vbif transactions */
1648 mdss_mdp_vbif_axi_halt(mdata);
1649 }
1650
1651 return NOTIFY_OK;
1652}
1653
1654static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
1655{
1656 int ret;
1657
1658 ret = of_property_read_u32(mdata->pdev->dev.of_node,
1659 "qcom,max-clk-rate", &mdata->max_mdp_clk_rate);
1660 if (ret) {
1661 pr_err("failed to get max mdp clock rate\n");
1662 return ret;
1663 }
1664
1665 pr_debug("max mdp clk rate=%d\n", mdata->max_mdp_clk_rate);
1666
1667 ret = devm_request_irq(&mdata->pdev->dev, mdss_mdp_hw.irq_info->irq,
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05301668 mdss_irq_handler, 0, "MDSS", mdata);
Sachin Bhayareeeb88892018-01-02 16:36:01 +05301669 if (ret) {
1670 pr_err("mdp request_irq() failed!\n");
1671 return ret;
1672 }
1673 disable_irq(mdss_mdp_hw.irq_info->irq);
1674
1675 mdata->fs = devm_regulator_get(&mdata->pdev->dev, "vdd");
1676 if (IS_ERR_OR_NULL(mdata->fs)) {
1677 mdata->fs = NULL;
1678 pr_err("unable to get gdsc regulator\n");
1679 return -EINVAL;
1680 }
1681
1682 mdata->venus = devm_regulator_get_optional(&mdata->pdev->dev,
1683 "gdsc-venus");
1684 if (IS_ERR_OR_NULL(mdata->venus)) {
1685 mdata->venus = NULL;
1686 pr_debug("unable to get venus gdsc regulator\n");
1687 }
1688
1689 mdata->fs_ena = false;
1690
1691 mdata->gdsc_cb.notifier_call = mdss_mdp_gdsc_notifier_call;
1692 mdata->gdsc_cb.priority = 5;
1693 if (regulator_register_notifier(mdata->fs, &(mdata->gdsc_cb)))
1694 pr_warn("GDSC notification registration failed!\n");
1695 else
1696 mdata->regulator_notif_register = true;
1697
1698 mdata->vdd_cx = devm_regulator_get_optional(&mdata->pdev->dev,
1699 "vdd-cx");
1700 if (IS_ERR_OR_NULL(mdata->vdd_cx)) {
1701 pr_debug("unable to get CX reg. rc=%d\n",
1702 PTR_RET(mdata->vdd_cx));
1703 mdata->vdd_cx = NULL;
1704 }
1705
1706 mdata->reg_bus_clt = mdss_reg_bus_vote_client_create("mdp\0");
1707 if (IS_ERR(mdata->reg_bus_clt)) {
1708 pr_err("bus client register failed\n");
1709 return PTR_ERR(mdata->reg_bus_clt);
1710 }
1711
1712 if (mdss_mdp_irq_clk_register(mdata, "bus_clk", MDSS_CLK_AXI) ||
1713 mdss_mdp_irq_clk_register(mdata, "iface_clk", MDSS_CLK_AHB) ||
1714 mdss_mdp_irq_clk_register(mdata, "core_clk",
1715 MDSS_CLK_MDP_CORE))
1716 return -EINVAL;
1717
1718 /* lut_clk is not present on all MDSS revisions */
1719 mdss_mdp_irq_clk_register(mdata, "lut_clk", MDSS_CLK_MDP_LUT);
1720
1721 /* vsync_clk is optional for non-smart panels */
1722 mdss_mdp_irq_clk_register(mdata, "vsync_clk", MDSS_CLK_MDP_VSYNC);
1723
1724 /* Setting the default clock rate to the max supported.*/
1725 mdss_mdp_set_clk_rate(mdata->max_mdp_clk_rate);
1726 pr_debug("mdp clk rate=%ld\n",
1727 mdss_mdp_get_clk_rate(MDSS_CLK_MDP_CORE, false));
1728
1729 return 0;
1730}
1731
1732static void mdss_debug_enable_clock(int on)
1733{
1734 if (on)
1735 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
1736 else
1737 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
1738}
1739
1740static int mdss_mdp_debug_init(struct platform_device *pdev,
1741 struct mdss_data_type *mdata)
1742{
1743 int rc;
1744 struct mdss_debug_base *dbg_blk;
1745
1746 mdata->debug_inf.debug_enable_clock = mdss_debug_enable_clock;
1747
1748 rc = mdss_debugfs_init(mdata);
1749 if (rc)
1750 return rc;
1751
1752 rc = mdss_mdp_debugfs_init(mdata);
1753 if (rc) {
1754 mdss_debugfs_remove(mdata);
1755 return rc;
1756 }
1757
1758 mdss_debug_register_io("mdp", &mdata->mdss_io, &dbg_blk);
1759 mdss_debug_register_dump_range(pdev, dbg_blk, "qcom,regs-dump-mdp",
1760 "qcom,regs-dump-names-mdp", "qcom,regs-dump-xin-id-mdp");
1761
1762 if (mdata->vbif_io.base)
1763 mdss_debug_register_io("vbif", &mdata->vbif_io, NULL);
1764 if (mdata->vbif_nrt_io.base)
1765 mdss_debug_register_io("vbif_nrt", &mdata->vbif_nrt_io, NULL);
1766
1767 return 0;
1768}
1769
1770static u32 mdss_get_props(void)
1771{
1772 u32 props = 0;
1773 void __iomem *props_base = ioremap(0xFC4B8114, 4);
1774
1775 if (props_base) {
1776 props = readl_relaxed(props_base);
1777 iounmap(props_base);
1778 }
1779 return props;
1780}
1781
1782void mdss_mdp_init_default_prefill_factors(struct mdss_data_type *mdata)
1783{
1784 mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor = 8;
1785 mdata->prefill_data.prefill_factors.fmt_mt_factor = 4;
1786 mdata->prefill_data.prefill_factors.fmt_linear_factor = 1;
1787 mdata->prefill_data.prefill_factors.scale_factor = 1;
1788 mdata->prefill_data.prefill_factors.xtra_ff_factor = 2;
1789
1790 if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
1791 mdata->prefill_data.ts_threshold = 25;
1792 mdata->prefill_data.ts_end = 8;
1793 mdata->prefill_data.ts_rate.numer = 1;
1794 mdata->prefill_data.ts_rate.denom = 4;
1795 mdata->prefill_data.ts_overhead = 2;
1796 }
1797}
1798
1799static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
1800{
1801
1802 mdata->per_pipe_ib_factor.numer = 0;
1803 mdata->per_pipe_ib_factor.denom = 0;
1804 mdata->apply_post_scale_bytes = true;
1805 mdata->hflip_buffer_reused = true;
1806 /* prevent disable of prefill calculations */
1807 mdata->min_prefill_lines = 0xffff;
1808 /* clock gating feature is disabled by default */
1809 mdata->enable_gate = false;
1810 mdata->pixel_ram_size = 0;
1811 mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_FLAT;
1812
1813 mdss_mdp_hw_rev_debug_caps_init(mdata);
1814
1815 switch (mdata->mdp_rev) {
1816 case MDSS_MDP_HW_REV_107:
1817 mdss_set_quirk(mdata, MDSS_QUIRK_ROTCDP);
1818 case MDSS_MDP_HW_REV_107_1:
1819 mdss_mdp_format_flag_removal(invalid_mdp107_wb_output_fmts,
1820 ARRAY_SIZE(invalid_mdp107_wb_output_fmts),
1821 VALID_MDP_WB_INTF_FORMAT);
1822 /* fall-through */
1823 case MDSS_MDP_HW_REV_107_2:
1824 mdata->max_target_zorder = 7; /* excluding base layer */
1825 mdata->max_cursor_size = 128;
1826 mdata->per_pipe_ib_factor.numer = 8;
1827 mdata->per_pipe_ib_factor.denom = 5;
1828 mdata->apply_post_scale_bytes = false;
1829 mdata->hflip_buffer_reused = false;
1830 mdata->min_prefill_lines = 21;
1831 mdata->has_ubwc = true;
1832 mdata->pixel_ram_size = 50 * 1024;
1833 set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
1834 set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
1835 set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
1836 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1837 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1838 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1839 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1840 set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
1841 mdata->mdss_caps_map);
1842 set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
1843 mdata->mdss_caps_map);
1844 mdss_mdp_init_default_prefill_factors(mdata);
1845 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
1846 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
1847 mdss_set_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED);
1848 break;
1849 case MDSS_MDP_HW_REV_105:
1850 case MDSS_MDP_HW_REV_109:
1851 mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC);
1852 mdata->max_target_zorder = 7; /* excluding base layer */
1853 mdata->max_cursor_size = 128;
1854 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1855 set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
1856 mdata->mdss_caps_map);
1857 break;
1858 case MDSS_MDP_HW_REV_110:
1859 mdss_set_quirk(mdata, MDSS_QUIRK_BWCPANIC);
1860 mdata->max_target_zorder = 4; /* excluding base layer */
1861 mdata->max_cursor_size = 128;
1862 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1863 mdata->min_prefill_lines = 12;
1864 mdata->props = mdss_get_props();
1865 break;
1866 case MDSS_MDP_HW_REV_112:
1867 mdata->max_target_zorder = 4; /* excluding base layer */
1868 mdata->max_cursor_size = 64;
1869 mdata->min_prefill_lines = 12;
1870 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1871 break;
1872 case MDSS_MDP_HW_REV_114:
1873 /* disable ECG for 28nm PHY platform */
1874 mdata->enable_gate = false;
1875 case MDSS_MDP_HW_REV_116:
1876 mdata->max_target_zorder = 4; /* excluding base layer */
1877 mdata->max_cursor_size = 128;
1878 mdata->min_prefill_lines = 14;
1879 mdata->has_ubwc = true;
1880 mdata->pixel_ram_size = 40 * 1024;
1881 mdata->apply_post_scale_bytes = false;
1882 mdata->hflip_buffer_reused = false;
1883 mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
1884 set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
1885 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1886 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1887 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1888 mdss_mdp_init_default_prefill_factors(mdata);
1889 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1890 mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
1891 mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
1892 break;
1893 case MDSS_MDP_HW_REV_115:
1894 mdata->max_target_zorder = 4; /* excluding base layer */
1895 mdata->max_cursor_size = 128;
1896 mdata->min_prefill_lines = 14;
1897 mdata->has_ubwc = false;
1898 mdata->pixel_ram_size = 16 * 1024;
1899 mdata->apply_post_scale_bytes = false;
1900 mdata->hflip_buffer_reused = false;
1901 /* disable ECG for 28nm PHY platform */
1902 mdata->enable_gate = false;
1903 mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
1904 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1905 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1906 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1907 set_bit(MDSS_CAPS_MIXER_1_FOR_WB, mdata->mdss_caps_map);
1908 mdss_mdp_init_default_prefill_factors(mdata);
1909 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1910 mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
1911 mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
1912 break;
1913 case MDSS_MDP_HW_REV_300:
1914 case MDSS_MDP_HW_REV_301:
1915 mdata->max_target_zorder = 7; /* excluding base layer */
1916 mdata->max_cursor_size = 384;
1917 mdata->per_pipe_ib_factor.numer = 8;
1918 mdata->per_pipe_ib_factor.denom = 5;
1919 mdata->apply_post_scale_bytes = false;
1920 mdata->hflip_buffer_reused = false;
1921 mdata->min_prefill_lines = 25;
1922 mdata->has_ubwc = true;
1923 mdata->pixel_ram_size = 50 * 1024;
1924 mdata->rects_per_sspp[MDSS_MDP_PIPE_TYPE_DMA] = 2;
1925
1926 set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
1927 set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
1928 set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
1929 set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
1930 set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
1931 set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
1932 set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
1933 set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
1934 set_bit(MDSS_QOS_IB_NOCR, mdata->mdss_qos_map);
1935 set_bit(MDSS_CAPS_YUV_CONFIG, mdata->mdss_caps_map);
1936 set_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED,
1937 mdata->mdss_caps_map);
1938 set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
1939 mdata->mdss_caps_map);
1940 set_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map);
1941 set_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map);
1942 mdss_mdp_init_default_prefill_factors(mdata);
1943 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU);
1944 mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT);
1945 mdss_set_quirk(mdata, MDSS_QUIRK_SRC_SPLIT_ALWAYS);
1946 mdata->has_wb_ubwc = true;
1947 set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map);
1948 break;
1949 default:
1950 mdata->max_target_zorder = 4; /* excluding base layer */
1951 mdata->max_cursor_size = 64;
1952 }
1953
1954 if (mdata->mdp_rev < MDSS_MDP_HW_REV_103)
1955 mdss_set_quirk(mdata, MDSS_QUIRK_DOWNSCALE_HANG);
1956
1957 if (mdata->mdp_rev < MDSS_MDP_HW_REV_102 ||
1958 mdata->mdp_rev == MDSS_MDP_HW_REV_200)
1959 mdss_set_quirk(mdata, MDSS_QUIRK_FMT_PACK_PATTERN);
1960}
1961
1962static void mdss_hw_rev_init(struct mdss_data_type *mdata)
1963{
1964 if (mdata->mdp_rev)
1965 return;
1966
1967 mdata->mdp_rev = MDSS_REG_READ(mdata, MDSS_REG_HW_VERSION);
1968 mdss_mdp_hw_rev_caps_init(mdata);
1969}
1970
1971/**
1972 * mdss_hw_init() - Initialize MDSS target specific register settings
1973 * @mdata: MDP private data
1974 *
1975 * Initialize basic MDSS hardware settings based on the board specific
1976 * parameters. This function does not explicitly turn on the MDP clocks
1977 * and so it must be called with the MDP clocks already enabled.
1978 */
1979void mdss_hw_init(struct mdss_data_type *mdata)
1980{
1981 struct mdss_mdp_pipe *vig;
1982
1983 mdss_hw_rev_init(mdata);
1984
1985 /* Disable hw underrun recovery only for older mdp reversions. */
1986 if (mdata->mdp_rev < MDSS_MDP_HW_REV_105)
1987 writel_relaxed(0x0, mdata->mdp_base +
1988 MDSS_MDP_REG_VIDEO_INTF_UNDERFLOW_CTL);
1989
1990 if (mdata->hw_settings) {
1991 struct mdss_hw_settings *hws = mdata->hw_settings;
1992
1993 while (hws->reg) {
1994 writel_relaxed(hws->val, hws->reg);
1995 hws++;
1996 }
1997 }
1998
1999 vig = mdata->vig_pipes;
2000
2001 mdata->nmax_concurrent_ad_hw =
2002 (mdata->mdp_rev < MDSS_MDP_HW_REV_103) ? 1 : 2;
2003
2004 pr_debug("MDP hw init done\n");
2005}
2006
2007static u32 mdss_mdp_res_init(struct mdss_data_type *mdata)
2008{
2009 u32 rc = 0;
2010
2011 if (mdata->res_init) {
2012 pr_err("mdss resources already initialized\n");
2013 return -EPERM;
2014 }
2015
2016 mdata->res_init = true;
2017 mdata->clk_ena = false;
2018 mdss_mdp_hw.irq_info->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
2019 mdss_mdp_hw.irq_info->irq_ena = false;
2020
2021 rc = mdss_mdp_irq_clk_setup(mdata);
2022 if (rc)
2023 return rc;
2024
2025 mdata->hist_intr.req = 0;
2026 mdata->hist_intr.curr = 0;
2027 mdata->hist_intr.state = 0;
2028 spin_lock_init(&mdata->hist_intr.lock);
2029
2030 mdata->iclient = msm_ion_client_create(mdata->pdev->name);
2031 if (IS_ERR_OR_NULL(mdata->iclient)) {
2032 pr_err("msm_ion_client_create() return error (%pK)\n",
2033 mdata->iclient);
2034 mdata->iclient = NULL;
2035 }
2036
2037 return rc;
2038}
2039
2040static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata,
2041 struct device *dev)
2042{
2043 int ret;
2044 struct device_node *node;
2045 u32 prop_val;
2046
2047 if (!dev)
2048 return -EPERM;
2049
2050 node = of_get_child_by_name(dev->of_node, "qcom,mdss-scaler-offsets");
2051 if (!node)
2052 return 0;
2053
2054 if (mdata->scaler_off)
2055 return -EFAULT;
2056
2057 mdata->scaler_off = devm_kzalloc(&mdata->pdev->dev,
2058 sizeof(*mdata->scaler_off), GFP_KERNEL);
2059 if (!mdata->scaler_off)
2060 return -ENOMEM;
2061
2062 ret = of_property_read_u32(node,
2063 "qcom,mdss-vig-scaler-off",
2064 &prop_val);
2065 if (ret) {
2066 pr_err("read property %s failed ret %d\n",
2067 "qcom,mdss-vig-scaler-off", ret);
2068 return -EINVAL;
2069 }
2070 mdata->scaler_off->vig_scaler_off = prop_val;
2071 ret = of_property_read_u32(node,
2072 "qcom,mdss-vig-scaler-lut-off",
2073 &prop_val);
2074 if (ret) {
2075 pr_err("read property %s failed ret %d\n",
2076 "qcom,mdss-vig-scaler-lut-off", ret);
2077 return -EINVAL;
2078 }
2079 mdata->scaler_off->vig_scaler_lut_off = prop_val;
2080 mdata->scaler_off->has_dest_scaler =
2081 of_property_read_bool(mdata->pdev->dev.of_node,
2082 "qcom,mdss-has-dest-scaler");
2083 if (mdata->scaler_off->has_dest_scaler) {
2084 ret = of_property_read_u32(node,
2085 "qcom,mdss-dest-block-off",
2086 &prop_val);
2087 if (ret) {
2088 pr_err("read property %s failed ret %d\n",
2089 "qcom,mdss-dest-block-off", ret);
2090 return -EINVAL;
2091 }
2092 mdata->scaler_off->dest_base = mdata->mdss_io.base +
2093 prop_val;
2094 mdata->scaler_off->ndest_scalers =
2095 mdss_mdp_parse_dt_prop_len(mdata->pdev,
2096 "qcom,mdss-dest-scalers-off");
2097 mdata->scaler_off->dest_scaler_off =
2098 devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
2099 mdata->scaler_off->ndest_scalers,
2100 GFP_KERNEL);
2101 if (!mdata->scaler_off->dest_scaler_off) {
2102 kfree(mdata->scaler_off->dest_scaler_off);
2103 return -ENOMEM;
2104 }
2105 ret = mdss_mdp_parse_dt_handler(mdata->pdev,
2106 "qcom,mdss-dest-scaler-off",
2107 mdata->scaler_off->dest_scaler_off,
2108 mdata->scaler_off->ndest_scalers);
2109 if (ret)
2110 return -EINVAL;
2111 mdata->scaler_off->dest_scaler_lut_off =
2112 devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
2113 mdata->scaler_off->ndest_scalers,
2114 GFP_KERNEL);
2115 if (!mdata->scaler_off->dest_scaler_lut_off) {
2116 kfree(mdata->scaler_off->dest_scaler_lut_off);
2117 return -ENOMEM;
2118 }
2119 ret = mdss_mdp_parse_dt_handler(mdata->pdev,
2120 "qcom,mdss-dest-scalers-lut-off",
2121 mdata->scaler_off->dest_scaler_lut_off,
2122 mdata->scaler_off->ndest_scalers);
2123 if (ret)
2124 return -EINVAL;
2125 }
2126
2127 return 0;
2128}
2129
2130/**
2131 * mdss_mdp_footswitch_ctrl_splash() - clocks handoff for cont. splash screen
2132 * @on: 1 to start handoff, 0 to complete the handoff after first frame update
2133 *
2134 * MDSS Clocks and GDSC are already on during continuous splash screen, but
2135 * increasing ref count will keep clocks from being turned off until handoff
2136 * has properly happened after frame update.
2137 */
2138void mdss_mdp_footswitch_ctrl_splash(int on)
2139{
2140 int ret;
2141 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2142
2143 if (mdata != NULL) {
2144 if (on) {
2145 mdata->handoff_pending = true;
2146 pr_debug("Enable MDP FS for splash.\n");
2147 if (mdata->venus) {
2148 ret = regulator_enable(mdata->venus);
2149 if (ret)
2150 pr_err("venus failed to enable\n");
2151 }
2152
2153 ret = regulator_enable(mdata->fs);
2154 if (ret)
2155 pr_err("Footswitch failed to enable\n");
2156
2157 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2158 mdss_bus_bandwidth_ctrl(true);
2159 } else {
2160 pr_debug("Disable MDP FS for splash.\n");
2161 mdss_bus_bandwidth_ctrl(false);
2162 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2163 regulator_disable(mdata->fs);
2164 if (mdata->venus)
2165 regulator_disable(mdata->venus);
2166 mdata->handoff_pending = false;
2167 }
2168 } else {
2169 pr_warn("mdss mdata not initialized\n");
2170 }
2171}
2172
2173static int mdss_mdp_get_pan_intf(const char *pan_intf)
2174{
2175 int i, rc = MDSS_PANEL_INTF_INVALID;
2176
2177 if (!pan_intf)
2178 return rc;
2179
2180 for (i = 0; i < ARRAY_SIZE(pan_types); i++) {
2181 if (!strcmp(pan_intf, pan_types[i].name)) {
2182 rc = pan_types[i].type;
2183 break;
2184 }
2185 }
2186 return rc;
2187}
2188
2189static int mdss_mdp_get_pan_cfg(struct mdss_panel_cfg *pan_cfg)
2190{
2191 char *t = NULL;
2192 char pan_intf_str[MDSS_MAX_PANEL_LEN];
2193 int rc, i, panel_len;
2194 char pan_name[MDSS_MAX_PANEL_LEN] = {'\0'};
2195
2196 if (!pan_cfg)
2197 return -EINVAL;
2198
2199 if (mdss_mdp_panel[0] == '0') {
2200 pr_debug("panel name is not set\n");
2201 pan_cfg->lk_cfg = false;
2202 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2203 return -EINVAL;
2204 } else if (mdss_mdp_panel[0] == '1') {
2205 pan_cfg->lk_cfg = true;
2206 } else {
2207 /* read from dt */
2208 pan_cfg->lk_cfg = true;
2209 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2210 return -EINVAL;
2211 }
2212
2213 /* skip lk cfg and delimiter; ex: "1:" */
2214 strlcpy(pan_name, &mdss_mdp_panel[2], MDSS_MAX_PANEL_LEN);
2215 t = strnstr(pan_name, ":", MDSS_MAX_PANEL_LEN);
2216 if (!t) {
2217 pr_err("pan_name=[%s] invalid\n", pan_name);
2218 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2219 return -EINVAL;
2220 }
2221
2222 for (i = 0; ((pan_name + i) < t) && (i < 4); i++)
2223 pan_intf_str[i] = *(pan_name + i);
2224 pan_intf_str[i] = 0;
2225 pr_debug("%d panel intf %s\n", __LINE__, pan_intf_str);
2226 /* point to the start of panel name */
2227 t = t + 1;
2228 strlcpy(&pan_cfg->arg_cfg[0], t, sizeof(pan_cfg->arg_cfg));
2229 pr_debug("%d: t=[%s] panel name=[%s]\n", __LINE__,
2230 t, pan_cfg->arg_cfg);
2231
2232 panel_len = strlen(pan_cfg->arg_cfg);
2233 if (!panel_len) {
2234 pr_err("Panel name is invalid\n");
2235 pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
2236 return -EINVAL;
2237 }
2238
2239 rc = mdss_mdp_get_pan_intf(pan_intf_str);
2240 pan_cfg->pan_intf = (rc < 0) ? MDSS_PANEL_INTF_INVALID : rc;
2241 return 0;
2242}
2243
2244static int mdss_mdp_parse_dt_pan_intf(struct platform_device *pdev)
2245{
2246 int rc;
2247 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2248 const char *prim_intf = NULL;
2249
2250 rc = of_property_read_string(pdev->dev.of_node,
2251 "qcom,mdss-pref-prim-intf", &prim_intf);
2252 if (rc)
2253 return -ENODEV;
2254
2255 rc = mdss_mdp_get_pan_intf(prim_intf);
2256 if (rc < 0) {
2257 mdata->pan_cfg.pan_intf = MDSS_PANEL_INTF_INVALID;
2258 } else {
2259 mdata->pan_cfg.pan_intf = rc;
2260 rc = 0;
2261 }
2262 return rc;
2263}
2264
2265static int mdss_mdp_get_cmdline_config(struct platform_device *pdev)
2266{
2267 int rc, len = 0;
2268 int *intf_type;
2269 char *panel_name;
2270 struct mdss_panel_cfg *pan_cfg;
2271 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2272
2273 mdata->pan_cfg.arg_cfg[MDSS_MAX_PANEL_LEN] = 0;
2274 pan_cfg = &mdata->pan_cfg;
2275 panel_name = &pan_cfg->arg_cfg[0];
2276 intf_type = &pan_cfg->pan_intf;
2277
2278 /* reads from dt by default */
2279 pan_cfg->lk_cfg = true;
2280
2281 len = strlen(mdss_mdp_panel);
2282
2283 if (len > 0) {
2284 rc = mdss_mdp_get_pan_cfg(pan_cfg);
2285 if (!rc) {
2286 pan_cfg->init_done = true;
2287 return rc;
2288 }
2289 }
2290
2291 rc = mdss_mdp_parse_dt_pan_intf(pdev);
2292 /* if pref pan intf is not present */
2293 if (rc)
2294 pr_warn("unable to parse device tree for pan intf\n");
2295
2296 pan_cfg->init_done = true;
2297
2298 return 0;
2299}
2300
2301static void __update_sspp_info(struct mdss_mdp_pipe *pipe,
2302 int pipe_cnt, char *type, char *buf, int *cnt)
2303{
2304 int i;
2305 int j;
2306 size_t len = PAGE_SIZE;
2307 int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
2308
2309#define SPRINT(fmt, ...) \
2310 (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
2311
2312 for (i = 0; i < pipe_cnt && pipe; i++) {
2313 SPRINT("pipe_num:%d pipe_type:%s pipe_ndx:%d rects:%d pipe_is_handoff:%d display_id:%d ",
2314 pipe->num, type, pipe->ndx, pipe->multirect.max_rects,
2315 pipe->is_handed_off, mdss_mdp_get_display_id(pipe));
2316 SPRINT("fmts_supported:");
2317 for (j = 0; j < num_bytes; j++)
2318 SPRINT("%d,", pipe->supported_formats[j]);
2319 SPRINT("\n");
2320 pipe += pipe->multirect.max_rects;
2321 }
2322#undef SPRINT
2323}
2324
2325static void mdss_mdp_update_sspp_info(struct mdss_data_type *mdata,
2326 char *buf, int *cnt)
2327{
2328 __update_sspp_info(mdata->vig_pipes, mdata->nvig_pipes,
2329 "vig", buf, cnt);
2330 __update_sspp_info(mdata->rgb_pipes, mdata->nrgb_pipes,
2331 "rgb", buf, cnt);
2332 __update_sspp_info(mdata->dma_pipes, mdata->ndma_pipes,
2333 "dma", buf, cnt);
2334 __update_sspp_info(mdata->cursor_pipes, mdata->ncursor_pipes,
2335 "cursor", buf, cnt);
2336}
2337
2338static void mdss_mdp_update_wb_info(struct mdss_data_type *mdata,
2339 char *buf, int *cnt)
2340{
2341#define SPRINT(fmt, ...) \
2342 (*cnt += scnprintf(buf + *cnt, len - *cnt, fmt, ##__VA_ARGS__))
2343 size_t len = PAGE_SIZE;
2344 int i;
2345 int num_bytes = BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1);
2346
2347 SPRINT("rot_input_fmts=");
2348 for (i = 0; i < num_bytes && mdata->wb; i++)
2349 SPRINT("%d ", mdata->wb->supported_input_formats[i]);
2350 SPRINT("\nrot_output_fmts=");
2351 for (i = 0; i < num_bytes && mdata->wb; i++)
2352 SPRINT("%d ", mdata->wb->supported_input_formats[i]);
2353 SPRINT("\nwb_output_fmts=");
2354 for (i = 0; i < num_bytes && mdata->wb; i++)
2355 SPRINT("%d ", mdata->wb->supported_output_formats[i]);
2356 SPRINT("\n");
2357#undef SPRINT
2358}
2359
2360ssize_t mdss_mdp_show_capabilities(struct device *dev,
2361 struct device_attribute *attr, char *buf)
2362{
2363 struct mdss_data_type *mdata = dev_get_drvdata(dev);
2364 size_t len = PAGE_SIZE;
2365 int cnt = 0;
2366
2367#define SPRINT(fmt, ...) \
2368 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2369
2370 SPRINT("mdp_version=5\n");
2371 SPRINT("hw_rev=%d\n", mdata->mdp_rev);
2372 SPRINT("pipe_count:%d\n", mdata->nvig_pipes + mdata->nrgb_pipes +
2373 mdata->ndma_pipes + mdata->ncursor_pipes);
2374 mdss_mdp_update_sspp_info(mdata, buf, &cnt);
2375 mdss_mdp_update_wb_info(mdata, buf, &cnt);
2376 /* TODO : need to remove num pipes info */
2377 SPRINT("rgb_pipes=%d\n", mdata->nrgb_pipes);
2378 SPRINT("vig_pipes=%d\n", mdata->nvig_pipes);
2379 SPRINT("dma_pipes=%d\n", mdata->ndma_pipes);
2380 SPRINT("blending_stages=%d\n", mdata->max_target_zorder);
2381 SPRINT("cursor_pipes=%d\n", mdata->ncursor_pipes);
2382 SPRINT("max_cursor_size=%d\n", mdata->max_cursor_size);
2383 SPRINT("smp_count=%d\n", mdata->smp_mb_cnt);
2384 SPRINT("smp_size=%d\n", mdata->smp_mb_size);
2385 SPRINT("smp_mb_per_pipe=%d\n", mdata->smp_mb_per_pipe);
2386 SPRINT("max_downscale_ratio=%d\n", MAX_DOWNSCALE_RATIO);
2387 SPRINT("max_upscale_ratio=%d\n", MAX_UPSCALE_RATIO);
2388
2389 if (mdata->nwb)
2390 SPRINT("wb_intf_index=%d\n", mdata->nwb - 1);
2391
2392 if (test_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map)) {
2393 SPRINT("fmt_mt_nv12_factor=%d\n",
2394 mdata->prefill_data.prefill_factors.fmt_mt_nv12_factor);
2395 SPRINT("fmt_mt_factor=%d\n",
2396 mdata->prefill_data.prefill_factors.fmt_mt_factor);
2397 SPRINT("fmt_linear_factor=%d\n",
2398 mdata->prefill_data.prefill_factors.fmt_linear_factor);
2399 SPRINT("scale_factor=%d\n",
2400 mdata->prefill_data.prefill_factors.scale_factor);
2401 SPRINT("xtra_ff_factor=%d\n",
2402 mdata->prefill_data.prefill_factors.xtra_ff_factor);
2403 }
2404
2405 if (test_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map)) {
2406 SPRINT("amortizable_threshold=%d\n",
2407 mdata->prefill_data.ts_threshold);
2408 SPRINT("system_overhead_lines=%d\n",
2409 mdata->prefill_data.ts_overhead);
2410 }
2411
2412 if (mdata->props)
2413 SPRINT("props=%d\n", mdata->props);
2414 if (mdata->max_bw_low)
2415 SPRINT("max_bandwidth_low=%u\n", mdata->max_bw_low);
2416 if (mdata->max_bw_high)
2417 SPRINT("max_bandwidth_high=%u\n", mdata->max_bw_high);
2418 if (mdata->max_pipe_width)
2419 SPRINT("max_pipe_width=%d\n", mdata->max_pipe_width);
2420 if (mdata->max_mixer_width)
2421 SPRINT("max_mixer_width=%d\n", mdata->max_mixer_width);
2422 if (mdata->max_bw_per_pipe)
2423 SPRINT("max_pipe_bw=%u\n", mdata->max_bw_per_pipe);
2424 if (mdata->max_mdp_clk_rate)
2425 SPRINT("max_mdp_clk=%u\n", mdata->max_mdp_clk_rate);
2426 if (mdata->clk_factor.numer)
2427 SPRINT("clk_fudge_factor=%u,%u\n", mdata->clk_factor.numer,
2428 mdata->clk_factor.denom);
2429 if (mdata->has_rot_dwnscale) {
2430 if (mdata->rot_dwnscale_min)
2431 SPRINT("rot_dwnscale_min=%u\n",
2432 mdata->rot_dwnscale_min);
2433 if (mdata->rot_dwnscale_max)
2434 SPRINT("rot_dwnscale_max=%u\n",
2435 mdata->rot_dwnscale_max);
2436 }
2437 SPRINT("features=");
2438 if (mdata->has_bwc)
2439 SPRINT(" bwc");
2440 if (mdata->has_ubwc)
2441 SPRINT(" ubwc");
2442 if (mdata->has_wb_ubwc)
2443 SPRINT(" wb_ubwc");
2444 if (mdata->has_decimation)
2445 SPRINT(" decimation");
2446 if (mdata->highest_bank_bit && !mdss_mdp_is_ubwc_supported(mdata))
2447 SPRINT(" tile_format");
2448 if (mdata->has_non_scalar_rgb)
2449 SPRINT(" non_scalar_rgb");
2450 if (mdata->has_src_split)
2451 SPRINT(" src_split");
2452 if (mdata->has_rot_dwnscale)
2453 SPRINT(" rotator_downscale");
2454 if (mdata->max_bw_settings_cnt)
2455 SPRINT(" dynamic_bw_limit");
2456 if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
2457 SPRINT(" qseed3");
2458 if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map))
2459 SPRINT(" dest_scaler");
2460 if (mdata->has_separate_rotator)
2461 SPRINT(" separate_rotator");
2462 if (mdss_has_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED))
2463 SPRINT(" hdr");
2464 SPRINT("\n");
2465#undef SPRINT
2466
2467 return cnt;
2468}
2469
2470static ssize_t mdss_mdp_read_max_limit_bw(struct device *dev,
2471 struct device_attribute *attr, char *buf)
2472{
2473 struct mdss_data_type *mdata = dev_get_drvdata(dev);
2474 size_t len = PAGE_SIZE;
2475 u32 cnt = 0;
2476 int i;
2477
2478 char bw_names[4][8] = {"default", "camera", "hflip", "vflip"};
2479 char pipe_bw_names[4][16] = {"default_pipe", "camera_pipe",
2480 "hflip_pipe", "vflip_pipe"};
2481 struct mdss_max_bw_settings *bw_settings;
2482 struct mdss_max_bw_settings *pipe_bw_settings;
2483
2484 bw_settings = mdata->max_bw_settings;
2485 pipe_bw_settings = mdata->max_per_pipe_bw_settings;
2486
2487#define SPRINT(fmt, ...) \
2488 (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
2489
2490 SPRINT("bw_mode_bitmap=%d\n", mdata->bw_mode_bitmap);
2491 SPRINT("bw_limit_pending=%d\n", mdata->bw_limit_pending);
2492
2493 for (i = 0; i < mdata->max_bw_settings_cnt; i++) {
2494 SPRINT("%s=%d\n", bw_names[i], bw_settings->mdss_max_bw_val);
2495 bw_settings++;
2496 }
2497
2498 for (i = 0; i < mdata->mdss_per_pipe_bw_cnt; i++) {
2499 SPRINT("%s=%d\n", pipe_bw_names[i],
2500 pipe_bw_settings->mdss_max_bw_val);
2501 pipe_bw_settings++;
2502 }
2503
2504 return cnt;
2505}
2506
2507static ssize_t mdss_mdp_store_max_limit_bw(struct device *dev,
2508 struct device_attribute *attr, const char *buf, size_t len)
2509{
2510 struct mdss_data_type *mdata = dev_get_drvdata(dev);
2511 u32 data = 0;
2512
2513 if (kstrtouint(buf, 0, &data)) {
2514 pr_info("Not able scan to bw_mode_bitmap\n");
2515 } else {
2516 mdata->bw_mode_bitmap = data;
2517 mdata->bw_limit_pending = true;
2518 pr_debug("limit use case, bw_mode_bitmap = %d\n", data);
2519 }
2520
2521 return len;
2522}
2523
2524static DEVICE_ATTR(caps, 0444, mdss_mdp_show_capabilities, NULL);
2525static DEVICE_ATTR(bw_mode_bitmap, 0664,
2526 mdss_mdp_read_max_limit_bw, mdss_mdp_store_max_limit_bw);
2527
2528static struct attribute *mdp_fs_attrs[] = {
2529 &dev_attr_caps.attr,
2530 &dev_attr_bw_mode_bitmap.attr,
2531 NULL
2532};
2533
2534static struct attribute_group mdp_fs_attr_group = {
2535 .attrs = mdp_fs_attrs
2536};
2537
2538static int mdss_mdp_register_sysfs(struct mdss_data_type *mdata)
2539{
2540 struct device *dev = &mdata->pdev->dev;
2541 int rc;
2542
2543 rc = sysfs_create_group(&dev->kobj, &mdp_fs_attr_group);
2544
2545 return rc;
2546}
2547
2548int mdss_panel_get_intf_status(u32 disp_num, u32 intf_type)
2549{
2550 int rc, intf_status = 0;
2551 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
2552
2553 if (!mdss_res || !mdss_res->pan_cfg.init_done)
2554 return -EPROBE_DEFER;
2555
2556 if (mdss_res->handoff_pending) {
2557 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
2558 intf_status = readl_relaxed(mdata->mdp_base +
2559 MDSS_MDP_REG_DISP_INTF_SEL);
2560 mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
2561 if (intf_type == MDSS_PANEL_INTF_DSI) {
2562 if (disp_num == DISPLAY_1)
2563 rc = (intf_status & MDSS_MDP_INTF_DSI0_SEL);
2564 else if (disp_num == DISPLAY_2)
2565 rc = (intf_status & MDSS_MDP_INTF_DSI1_SEL);
2566 else
2567 rc = 0;
2568 } else if (intf_type == MDSS_PANEL_INTF_EDP) {
2569 intf_status &= MDSS_MDP_INTF_EDP_SEL;
2570 rc = (intf_status == MDSS_MDP_INTF_EDP_SEL);
2571 } else if (intf_type == MDSS_PANEL_INTF_HDMI) {
2572 intf_status &= MDSS_MDP_INTF_HDMI_SEL;
2573 rc = (intf_status == MDSS_MDP_INTF_HDMI_SEL);
2574 } else {
2575 rc = 0;
2576 }
2577 } else {
2578 rc = 0;
2579 }
2580
2581 return rc;
2582}
2583
2584static int mdss_mdp_probe(struct platform_device *pdev)
2585{
2586 struct resource *res;
2587 int rc;
2588 struct mdss_data_type *mdata;
2589 uint32_t intf_sel = 0;
2590 uint32_t split_display = 0;
2591 int num_of_display_on = 0;
2592 int i = 0;
2593
2594 if (!pdev->dev.of_node) {
2595 pr_err("MDP driver only supports device tree probe\n");
2596 return -ENOTSUPP;
2597 }
2598
2599 if (mdss_res) {
2600 pr_err("MDP already initialized\n");
2601 return -EINVAL;
2602 }
2603
2604 mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
2605 if (mdata == NULL)
2606 return -ENOMEM;
2607
2608 pdev->id = 0;
2609 mdata->pdev = pdev;
2610 platform_set_drvdata(pdev, mdata);
2611 mdss_res = mdata;
2612 mutex_init(&mdata->reg_lock);
2613 mutex_init(&mdata->reg_bus_lock);
2614 mutex_init(&mdata->bus_lock);
2615 INIT_LIST_HEAD(&mdata->reg_bus_clist);
2616 atomic_set(&mdata->sd_client_count, 0);
2617 atomic_set(&mdata->active_intf_cnt, 0);
2618
2619 mdss_res->mdss_util = mdss_get_util_intf();
2620 if (mdss_res->mdss_util == NULL) {
2621 pr_err("Failed to get mdss utility functions\n");
2622 return -ENODEV;
2623 }
2624
2625 mdss_res->mdss_util->get_iommu_domain = mdss_smmu_get_domain_id;
2626 mdss_res->mdss_util->iommu_attached = is_mdss_iommu_attached;
2627 mdss_res->mdss_util->iommu_ctrl = mdss_iommu_ctrl;
2628 mdss_res->mdss_util->bus_scale_set_quota = mdss_bus_scale_set_quota;
2629 mdss_res->mdss_util->bus_bandwidth_ctrl = mdss_bus_bandwidth_ctrl;
2630 mdss_res->mdss_util->panel_intf_type = mdss_panel_intf_type;
2631 mdss_res->mdss_util->panel_intf_status = mdss_panel_get_intf_status;
2632
Sachin Bhayare5076e252018-01-18 14:56:45 +05302633 rc = msm_mdss_ioremap_byname(pdev, &mdata->mdss_io, "mdp_phys");
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302634 if (rc) {
2635 pr_err("unable to map MDP base\n");
2636 goto probe_done;
2637 }
2638 pr_debug("MDSS HW Base addr=0x%x len=0x%x\n",
2639 (int) (unsigned long) mdata->mdss_io.base,
2640 mdata->mdss_io.len);
2641
Sachin Bhayare5076e252018-01-18 14:56:45 +05302642 rc = msm_mdss_ioremap_byname(pdev, &mdata->vbif_io, "vbif_phys");
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302643 if (rc) {
2644 pr_err("unable to map MDSS VBIF base\n");
2645 goto probe_done;
2646 }
2647 pr_debug("MDSS VBIF HW Base addr=0x%x len=0x%x\n",
2648 (int) (unsigned long) mdata->vbif_io.base,
2649 mdata->vbif_io.len);
2650
Sachin Bhayare5076e252018-01-18 14:56:45 +05302651 rc = msm_mdss_ioremap_byname(pdev, &mdata->vbif_nrt_io,
2652 "vbif_nrt_phys");
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302653 if (rc)
2654 pr_debug("unable to map MDSS VBIF non-realtime base\n");
2655 else
2656 pr_debug("MDSS VBIF NRT HW Base addr=%pK len=0x%x\n",
2657 mdata->vbif_nrt_io.base, mdata->vbif_nrt_io.len);
2658
2659 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2660 if (!res) {
2661 pr_err("unable to get MDSS irq\n");
2662 rc = -ENOMEM;
2663 goto probe_done;
2664 }
2665
2666 mdss_mdp_hw.irq_info = kcalloc(1, sizeof(struct irq_info), GFP_KERNEL);
2667 if (!mdss_mdp_hw.irq_info)
2668 return -ENOMEM;
2669
2670 mdss_mdp_hw.irq_info->irq = res->start;
2671 mdss_mdp_hw.ptr = mdata;
2672
2673 /* export misc. interrupts to external driver */
2674 mdata->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 32,
2675 &mdss_irq_domain_ops, mdata);
2676 if (!mdata->irq_domain) {
2677 pr_err("unable to add linear domain\n");
2678 rc = -ENOMEM;
2679 goto probe_done;
2680 }
2681
2682 mdss_misc_hw.irq_info = mdss_intr_line();
2683 rc = mdss_res->mdss_util->register_irq(&mdss_misc_hw);
2684 if (rc)
2685 pr_err("mdss_register_irq failed.\n");
2686
2687 rc = mdss_mdp_res_init(mdata);
2688 if (rc) {
2689 pr_err("unable to initialize mdss mdp resources\n");
2690 goto probe_done;
2691 }
2692
2693 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT_MS);
2694 if (mdata->idle_pc_enabled)
2695 pm_runtime_use_autosuspend(&pdev->dev);
2696 pm_runtime_set_suspended(&pdev->dev);
2697 pm_runtime_enable(&pdev->dev);
2698 if (!pm_runtime_enabled(&pdev->dev))
2699 mdss_mdp_footswitch_ctrl(mdata, true);
2700
2701 rc = mdss_mdp_bus_scale_register(mdata);
2702 if (rc) {
2703 pr_err("unable to register bus scaling\n");
2704 goto probe_done;
2705 }
2706
2707 /*
2708 * enable clocks and read mdp_rev as soon as possible once
2709 * kernel is up.
2710 */
2711 mdss_mdp_footswitch_ctrl_splash(true);
2712 mdss_hw_rev_init(mdata);
2713
2714 /*populate hw iomem base info from device tree*/
2715 rc = mdss_mdp_parse_dt(pdev);
2716 if (rc) {
2717 pr_err("unable to parse device tree\n");
2718 goto probe_done;
2719 }
2720
2721 rc = mdss_mdp_get_cmdline_config(pdev);
2722 if (rc) {
2723 pr_err("Error in panel override:rc=[%d]\n", rc);
2724 goto probe_done;
2725 }
2726
2727 rc = mdss_mdp_debug_init(pdev, mdata);
2728 if (rc) {
2729 pr_err("unable to initialize mdp debugging\n");
2730 goto probe_done;
2731 }
2732 rc = mdss_mdp_scaler_init(mdata, &pdev->dev);
2733 if (rc)
2734 goto probe_done;
2735
2736 rc = mdss_mdp_register_sysfs(mdata);
2737 if (rc)
2738 pr_err("unable to register mdp sysfs nodes\n");
2739
2740 rc = mdss_fb_register_mdp_instance(&mdp5);
2741 if (rc)
2742 pr_err("unable to register mdp instance\n");
2743
2744 rc = mdss_res->mdss_util->register_irq(&mdss_mdp_hw);
2745 if (rc)
2746 pr_err("mdss_register_irq failed.\n");
2747
2748 rc = mdss_smmu_init(mdata, &pdev->dev);
2749 if (rc)
2750 pr_err("mdss smmu init failed\n");
2751
2752 mdss_mdp_set_supported_formats(mdata);
2753
2754 mdss_res->mdss_util->mdp_probe_done = true;
2755
2756 mdss_hw_init(mdata);
2757
2758 rc = mdss_mdp_pp_init(&pdev->dev);
2759 if (rc)
2760 pr_err("unable to initialize mdss pp resources\n");
2761
2762 /* Restoring Secure configuration during boot-up */
2763 if (mdss_mdp_req_init_restore_cfg(mdata))
2764 __mdss_restore_sec_cfg(mdata);
2765
2766 if (mdss_has_quirk(mdata, MDSS_QUIRK_BWCPANIC)) {
2767 mdata->default_panic_lut0 = readl_relaxed(mdata->mdp_base +
2768 MMSS_MDP_PANIC_LUT0);
2769 mdata->default_panic_lut1 = readl_relaxed(mdata->mdp_base +
2770 MMSS_MDP_PANIC_LUT1);
2771 mdata->default_robust_lut = readl_relaxed(mdata->mdp_base +
2772 MMSS_MDP_ROBUST_LUT);
2773 }
2774
2775 /*
2776 * Read the DISP_INTF_SEL register to check if display was enabled in
2777 * bootloader or not. If yes, let handoff handle removing the extra
2778 * clk/regulator votes else turn off clk/regulators because purpose
2779 * here is to get mdp_rev.
2780 */
2781 intf_sel = readl_relaxed(mdata->mdp_base +
2782 MDSS_MDP_REG_DISP_INTF_SEL);
2783 split_display = readl_relaxed(mdata->mdp_base +
2784 MDSS_MDP_REG_SPLIT_DISPLAY_EN);
2785 mdata->splash_intf_sel = intf_sel;
2786 mdata->splash_split_disp = split_display;
2787
2788 if (intf_sel != 0) {
2789 for (i = 0; i < 4; i++)
2790 if ((intf_sel >> i*8) & 0x000000FF)
2791 num_of_display_on++;
2792
2793 /*
2794 * For split display enabled - DSI0, DSI1 interfaces are
2795 * considered as single display. So decrement
2796 * 'num_of_display_on' by 1
2797 */
2798 if (split_display)
2799 num_of_display_on--;
2800 }
2801 if (!num_of_display_on) {
2802 mdss_mdp_footswitch_ctrl_splash(false);
2803 msm_bus_scale_client_update_request(
2804 mdata->bus_hdl, 0);
2805 mdata->ao_bw_uc_idx = 0;
2806 } else {
2807 mdata->handoff_pending = true;
2808 /*
2809 * If multiple displays are enabled in LK, ctrl_splash off will
2810 * be called multiple times during splash_cleanup. Need to
2811 * enable it symmetrically
2812 */
2813 for (i = 1; i < num_of_display_on; i++)
2814 mdss_mdp_footswitch_ctrl_splash(true);
2815 }
2816
2817 mdp_intr_cb = kcalloc(ARRAY_SIZE(mdp_irq_map),
2818 sizeof(struct intr_callback), GFP_KERNEL);
2819 if (mdp_intr_cb == NULL)
2820 return -ENOMEM;
2821
2822 mdss_res->mdp_irq_mask = kcalloc(ARRAY_SIZE(mdp_intr_reg),
2823 sizeof(u32), GFP_KERNEL);
2824 if (mdss_res->mdp_irq_mask == NULL)
2825 return -ENOMEM;
2826
2827 pr_info("mdss version = 0x%x, bootloader display is %s, num %d, intf_sel=0x%08x\n",
2828 mdata->mdp_rev, num_of_display_on ? "on" : "off",
2829 num_of_display_on, intf_sel);
2830
2831probe_done:
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05302832 if (IS_ERR_VALUE((unsigned long)rc)) {
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302833 if (!num_of_display_on)
2834 mdss_mdp_footswitch_ctrl_splash(false);
2835
2836 if (mdata->regulator_notif_register)
2837 regulator_unregister_notifier(mdata->fs,
2838 &(mdata->gdsc_cb));
2839 mdss_mdp_hw.ptr = NULL;
2840 mdss_mdp_pp_term(&pdev->dev);
2841 mutex_destroy(&mdata->reg_lock);
2842 mdss_res = NULL;
2843 }
2844
2845 return rc;
2846}
2847
Sachin Bhayare5076e252018-01-18 14:56:45 +05302848static void mdss_mdp_parse_dt_regs_array(const u32 *arr,
2849 struct mdss_io_data *io, struct mdss_hw_settings *hws, int count)
Sachin Bhayareeeb88892018-01-02 16:36:01 +05302850{
2851 u32 len, reg;
2852 int i;
2853
2854 if (!arr)
2855 return;
2856
2857 for (i = 0, len = count * 2; i < len; i += 2) {
2858 reg = be32_to_cpu(arr[i]);
2859 if (reg >= io->len)
2860 continue;
2861
2862 hws->reg = io->base + reg;
2863 hws->val = be32_to_cpu(arr[i + 1]);
2864 pr_debug("reg: 0x%04x=0x%08x\n", reg, hws->val);
2865 hws++;
2866 }
2867}
2868
2869int mdss_mdp_parse_dt_hw_settings(struct platform_device *pdev)
2870{
2871 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2872 struct mdss_hw_settings *hws;
2873 const u32 *vbif_arr, *mdp_arr, *vbif_nrt_arr;
2874 int vbif_len, mdp_len, vbif_nrt_len;
2875
2876 vbif_arr = of_get_property(pdev->dev.of_node, "qcom,vbif-settings",
2877 &vbif_len);
2878 if (!vbif_arr || (vbif_len & 1)) {
2879 pr_debug("MDSS VBIF settings not found\n");
2880 vbif_len = 0;
2881 }
2882 vbif_len /= 2 * sizeof(u32);
2883
2884 vbif_nrt_arr = of_get_property(pdev->dev.of_node,
2885 "qcom,vbif-nrt-settings", &vbif_nrt_len);
2886 if (!vbif_nrt_arr || (vbif_nrt_len & 1)) {
2887 pr_debug("MDSS VBIF non-realtime settings not found\n");
2888 vbif_nrt_len = 0;
2889 }
2890 vbif_nrt_len /= 2 * sizeof(u32);
2891
2892 mdp_arr = of_get_property(pdev->dev.of_node, "qcom,mdp-settings",
2893 &mdp_len);
2894 if (!mdp_arr || (mdp_len & 1)) {
2895 pr_debug("MDSS MDP settings not found\n");
2896 mdp_len = 0;
2897 }
2898 mdp_len /= 2 * sizeof(u32);
2899
2900 if (!(mdp_len + vbif_len + vbif_nrt_len))
2901 return 0;
2902
2903 hws = devm_kzalloc(&pdev->dev, sizeof(*hws) * (vbif_len + mdp_len +
2904 vbif_nrt_len + 1), GFP_KERNEL);
2905 if (!hws)
2906 return -ENOMEM;
2907
2908 mdss_mdp_parse_dt_regs_array(vbif_arr, &mdata->vbif_io,
2909 hws, vbif_len);
2910 mdss_mdp_parse_dt_regs_array(vbif_nrt_arr, &mdata->vbif_nrt_io,
2911 hws, vbif_nrt_len);
2912 mdss_mdp_parse_dt_regs_array(mdp_arr, &mdata->mdss_io,
2913 hws + vbif_len, mdp_len);
2914
2915 mdata->hw_settings = hws;
2916
2917 return 0;
2918}
2919
2920static int mdss_mdp_parse_dt(struct platform_device *pdev)
2921{
2922 int rc, data;
2923 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
2924
2925 rc = mdss_mdp_parse_dt_hw_settings(pdev);
2926 if (rc) {
2927 pr_err("Error in device tree : hw settings\n");
2928 return rc;
2929 }
2930
2931 rc = mdss_mdp_parse_dt_pipe(pdev);
2932 if (rc) {
2933 pr_err("Error in device tree : pipes\n");
2934 return rc;
2935 }
2936
2937 rc = mdss_mdp_parse_dt_mixer(pdev);
2938 if (rc) {
2939 pr_err("Error in device tree : mixers\n");
2940 return rc;
2941 }
2942
2943 rc = mdss_mdp_parse_dt_misc(pdev);
2944 if (rc) {
2945 pr_err("Error in device tree : misc\n");
2946 return rc;
2947 }
2948
2949 rc = mdss_mdp_parse_dt_wb(pdev);
2950 if (rc) {
2951 pr_err("Error in device tree : wb\n");
2952 return rc;
2953 }
2954
2955 rc = mdss_mdp_parse_dt_ctl(pdev);
2956 if (rc) {
2957 pr_err("Error in device tree : ctl\n");
2958 return rc;
2959 }
2960
2961 rc = mdss_mdp_parse_dt_video_intf(pdev);
2962 if (rc) {
2963 pr_err("Error in device tree : ctl\n");
2964 return rc;
2965 }
2966
2967 rc = mdss_mdp_parse_dt_smp(pdev);
2968 if (rc) {
2969 pr_err("Error in device tree : smp\n");
2970 return rc;
2971 }
2972
2973 rc = mdss_mdp_parse_dt_prefill(pdev);
2974 if (rc) {
2975 pr_err("Error in device tree : prefill\n");
2976 return rc;
2977 }
2978
2979 rc = mdss_mdp_parse_dt_ad_cfg(pdev);
2980 if (rc) {
2981 pr_err("Error in device tree : ad\n");
2982 return rc;
2983 }
2984
2985 rc = mdss_mdp_parse_dt_cdm(pdev);
2986 if (rc)
2987 pr_debug("CDM offset not found in device tree\n");
2988
2989 rc = mdss_mdp_parse_dt_dsc(pdev);
2990 if (rc)
2991 pr_debug("DSC offset not found in device tree\n");
2992
2993 /* Parse the mdp specific register base offset*/
2994 rc = of_property_read_u32(pdev->dev.of_node,
2995 "qcom,mdss-mdp-reg-offset", &data);
2996 if (rc) {
2997 pr_err("Error in device tree : mdp reg base\n");
2998 return rc;
2999 }
3000 mdata->mdp_base = mdata->mdss_io.base + data;
3001 return 0;
3002}
3003
3004static void mdss_mdp_parse_dt_pipe_sw_reset(struct platform_device *pdev,
3005 u32 reg_off, char *prop_name, struct mdss_mdp_pipe *pipe_list,
3006 u32 npipes)
3007{
3008 int len;
3009 const u32 *arr;
3010
3011 arr = of_get_property(pdev->dev.of_node, prop_name, &len);
3012 if (arr) {
3013 int i;
3014
3015 len /= sizeof(u32);
3016 if (len != npipes) {
3017 pr_err("%s: invalid sw_reset entries req:%d found:%d\n",
3018 prop_name, len, npipes);
3019 return;
3020 }
3021
3022 for (i = 0; i < len; i++) {
3023 pipe_list[i].sw_reset.reg_off = reg_off;
3024 pipe_list[i].sw_reset.bit_off = be32_to_cpu(arr[i]);
3025
3026 pr_debug("%s[%d]: sw_reset: reg_off:0x%x bit_off:%d\n",
3027 prop_name, i, reg_off, be32_to_cpu(arr[i]));
3028 }
3029 }
3030}
3031
3032static int mdss_mdp_parse_dt_pipe_clk_ctrl(struct platform_device *pdev,
3033 char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
3034{
3035 int rc = 0, len;
3036 const u32 *arr;
3037
3038 arr = of_get_property(pdev->dev.of_node, prop_name, &len);
3039 if (arr) {
3040 int i, j;
3041
3042 len /= sizeof(u32);
3043 for (i = 0, j = 0; i < len; j++) {
3044 struct mdss_mdp_pipe *pipe = NULL;
3045
3046 if (j >= npipes) {
3047 pr_err("invalid clk ctrl enries for prop: %s\n",
3048 prop_name);
3049 return -EINVAL;
3050 }
3051
3052 pipe = &pipe_list[j];
3053
3054 pipe->clk_ctrl.reg_off = be32_to_cpu(arr[i++]);
3055 pipe->clk_ctrl.bit_off = be32_to_cpu(arr[i++]);
3056
3057 /* status register is next in line to ctrl register */
3058 pipe->clk_status.reg_off = pipe->clk_ctrl.reg_off + 4;
3059 pipe->clk_status.bit_off = be32_to_cpu(arr[i++]);
3060
3061 pr_debug("%s[%d]: ctrl: reg_off: 0x%x bit_off: %d\n",
3062 prop_name, j, pipe->clk_ctrl.reg_off,
3063 pipe->clk_ctrl.bit_off);
3064 pr_debug("%s[%d]: status: reg_off: 0x%x bit_off: %d\n",
3065 prop_name, j, pipe->clk_status.reg_off,
3066 pipe->clk_status.bit_off);
3067 }
3068 if (j != npipes) {
3069 pr_err("%s: %d entries found. required %d\n",
3070 prop_name, j, npipes);
3071 for (i = 0; i < npipes; i++) {
3072 memset(&pipe_list[i].clk_ctrl, 0,
3073 sizeof(pipe_list[i].clk_ctrl));
3074 memset(&pipe_list[i].clk_status, 0,
3075 sizeof(pipe_list[i].clk_status));
3076 }
3077 rc = -EINVAL;
3078 }
3079 } else {
3080 pr_err("error mandatory property '%s' not found\n", prop_name);
3081 rc = -EINVAL;
3082 }
3083
3084 return rc;
3085}
3086
3087static void mdss_mdp_parse_dt_pipe_panic_ctrl(struct platform_device *pdev,
3088 char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
3089{
3090 int i, j;
3091 int len;
3092 const u32 *arr;
3093 struct mdss_mdp_pipe *pipe = NULL;
3094
3095 arr = of_get_property(pdev->dev.of_node, prop_name, &len);
3096 if (arr) {
3097 len /= sizeof(u32);
3098 for (i = 0, j = 0; i < len; j++) {
3099 if (j >= npipes) {
3100 pr_err("invalid panic ctrl enries for prop: %s\n",
3101 prop_name);
3102 return;
3103 }
3104
3105 pipe = &pipe_list[j];
3106 pipe->panic_ctrl_ndx = be32_to_cpu(arr[i++]);
3107 }
3108 if (j != npipes)
3109 pr_err("%s: %d entries found. required %d\n",
3110 prop_name, j, npipes);
3111 } else {
3112 pr_debug("panic ctrl enabled but property '%s' not found\n",
3113 prop_name);
3114 }
3115}
3116
3117static int mdss_mdp_parse_dt_pipe_helper(struct platform_device *pdev,
3118 u32 ptype, char *ptypestr,
3119 struct mdss_mdp_pipe **out_plist,
3120 size_t len,
3121 u8 priority_base)
3122{
3123 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3124 u32 offsets[MDSS_MDP_MAX_SSPP];
3125 u32 ftch_id[MDSS_MDP_MAX_SSPP];
3126 u32 xin_id[MDSS_MDP_MAX_SSPP];
3127 u32 pnums[MDSS_MDP_MAX_SSPP];
3128 struct mdss_mdp_pipe *pipe_list;
3129 char prop_name[64];
3130 int i, cnt, rc;
3131 u32 rects_per_sspp;
3132
3133 if (!out_plist)
3134 return -EINVAL;
3135
3136 for (i = 0, cnt = 0; i < MDSS_MDP_MAX_SSPP && cnt < len; i++) {
3137 if (ptype == get_pipe_type_from_num(i)) {
3138 pnums[cnt] = i;
3139 cnt++;
3140 }
3141 }
3142
3143 if (cnt < len)
3144 pr_warn("Invalid %s pipe count: %zu, max supported: %d\n",
3145 ptypestr, len, cnt);
3146 if (cnt == 0) {
3147 *out_plist = NULL;
3148
3149 return 0;
3150 }
3151
3152 /* by default works in single rect mode unless otherwise noted */
3153 rects_per_sspp = mdata->rects_per_sspp[ptype] ? : 1;
3154
3155 pipe_list = devm_kzalloc(&pdev->dev,
3156 (sizeof(struct mdss_mdp_pipe) * cnt * rects_per_sspp),
3157 GFP_KERNEL);
3158 if (!pipe_list)
3159 return -ENOMEM;
3160
3161 if (mdata->has_pixel_ram || (ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) {
3162 for (i = 0; i < cnt; i++)
3163 ftch_id[i] = -1;
3164 } else {
3165 snprintf(prop_name, sizeof(prop_name),
3166 "qcom,mdss-pipe-%s-fetch-id", ptypestr);
3167 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, ftch_id,
3168 cnt);
3169 if (rc)
3170 goto parse_fail;
3171 }
3172
3173 snprintf(prop_name, sizeof(prop_name),
3174 "qcom,mdss-pipe-%s-xin-id", ptypestr);
3175 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, xin_id, cnt);
3176 if (rc)
3177 goto parse_fail;
3178
3179 snprintf(prop_name, sizeof(prop_name),
3180 "qcom,mdss-pipe-%s-off", ptypestr);
3181 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, offsets, cnt);
3182 if (rc)
3183 goto parse_fail;
3184
3185 rc = mdss_mdp_pipe_addr_setup(mdata, pipe_list, offsets, ftch_id,
3186 xin_id, ptype, pnums, cnt, rects_per_sspp,
3187 priority_base);
3188 if (rc)
3189 goto parse_fail;
3190
3191 snprintf(prop_name, sizeof(prop_name),
3192 "qcom,mdss-pipe-%s-clk-ctrl-offsets", ptypestr);
3193 rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev, prop_name,
3194 pipe_list, cnt);
3195 if (rc)
3196 goto parse_fail;
3197
3198 *out_plist = pipe_list;
3199
3200 return cnt;
3201parse_fail:
3202 devm_kfree(&pdev->dev, pipe_list);
3203
3204 return rc;
3205}
3206
3207static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev)
3208{
3209 int rc = 0;
3210 u32 nfids = 0, len, nxids = 0, npipes = 0;
3211 u32 sw_reset_offset = 0;
3212 u32 data[4];
3213
3214 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3215
3216 mdata->has_pixel_ram = !mdss_mdp_parse_dt_prop_len(pdev,
3217 "qcom,mdss-smp-data");
3218
3219 mdata->nvig_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3220 "qcom,mdss-pipe-vig-off");
3221 mdata->nrgb_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3222 "qcom,mdss-pipe-rgb-off");
3223 mdata->ndma_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3224 "qcom,mdss-pipe-dma-off");
3225 mdata->ncursor_pipes = mdss_mdp_parse_dt_prop_len(pdev,
3226 "qcom,mdss-pipe-cursor-off");
3227
3228 npipes = mdata->nvig_pipes + mdata->nrgb_pipes + mdata->ndma_pipes;
3229
3230 if (!mdata->has_pixel_ram) {
3231 nfids += mdss_mdp_parse_dt_prop_len(pdev,
3232 "qcom,mdss-pipe-vig-fetch-id");
3233 nfids += mdss_mdp_parse_dt_prop_len(pdev,
3234 "qcom,mdss-pipe-rgb-fetch-id");
3235 nfids += mdss_mdp_parse_dt_prop_len(pdev,
3236 "qcom,mdss-pipe-dma-fetch-id");
3237 if (npipes != nfids) {
3238 pr_err("device tree err: unequal number of pipes and smp ids");
3239 return -EINVAL;
3240 }
3241 }
3242
3243 if (mdata->nvig_pipes)
3244 nxids += mdss_mdp_parse_dt_prop_len(pdev,
3245 "qcom,mdss-pipe-vig-xin-id");
3246 if (mdata->nrgb_pipes)
3247 nxids += mdss_mdp_parse_dt_prop_len(pdev,
3248 "qcom,mdss-pipe-rgb-xin-id");
3249 if (mdata->ndma_pipes)
3250 nxids += mdss_mdp_parse_dt_prop_len(pdev,
3251 "qcom,mdss-pipe-dma-xin-id");
3252 if (npipes != nxids) {
3253 pr_err("device tree err: unequal number of pipes and xin ids\n");
3254 return -EINVAL;
3255 }
3256
3257 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_VIG, "vig",
3258 &mdata->vig_pipes, mdata->nvig_pipes, 0);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303259 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303260 goto parse_fail;
3261 mdata->nvig_pipes = rc;
3262
3263 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_RGB, "rgb",
3264 &mdata->rgb_pipes, mdata->nrgb_pipes,
3265 mdata->nvig_pipes);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303266 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303267 goto parse_fail;
3268 mdata->nrgb_pipes = rc;
3269
3270 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_DMA, "dma",
3271 &mdata->dma_pipes, mdata->ndma_pipes,
3272 mdata->nvig_pipes + mdata->nrgb_pipes);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303273 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303274 goto parse_fail;
3275 mdata->ndma_pipes = rc;
3276
3277 rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_CURSOR,
3278 "cursor", &mdata->cursor_pipes, mdata->ncursor_pipes,
3279 0);
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05303280 if (IS_ERR_VALUE((unsigned long)rc))
Sachin Bhayareeeb88892018-01-02 16:36:01 +05303281 goto parse_fail;
3282 mdata->ncursor_pipes = rc;
3283
3284 rc = 0;
3285
3286 mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-sw-reset-off",
3287 &sw_reset_offset, 1);
3288 if (sw_reset_offset) {
3289 if (mdata->vig_pipes)
3290 mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
3291 "qcom,mdss-pipe-vig-sw-reset-map",
3292 mdata->vig_pipes, mdata->nvig_pipes);
3293 if (mdata->rgb_pipes)
3294 mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
3295 "qcom,mdss-pipe-rgb-sw-reset-map",
3296 mdata->rgb_pipes, mdata->nrgb_pipes);
3297 if (mdata->dma_pipes)
3298 mdss_mdp_parse_dt_pipe_sw_reset(pdev, sw_reset_offset,
3299 "qcom,mdss-pipe-dma-sw-reset-map",
3300 mdata->dma_pipes, mdata->ndma_pipes);
3301 }
3302
3303 mdata->has_panic_ctrl = of_property_read_bool(pdev->dev.of_node,
3304 "qcom,mdss-has-panic-ctrl");
3305 if (mdata->has_panic_ctrl) {
3306 if (mdata->vig_pipes)
3307 mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
3308 "qcom,mdss-pipe-vig-panic-ctrl-offsets",
3309 mdata->vig_pipes, mdata->nvig_pipes);
3310 if (mdata->rgb_pipes)
3311 mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
3312 "qcom,mdss-pipe-rgb-panic-ctrl-offsets",
3313 mdata->rgb_pipes, mdata->nrgb_pipes);
3314 if (mdata->dma_pipes)
3315 mdss_mdp_parse_dt_pipe_panic_ctrl(pdev,
3316 "qcom,mdss-pipe-dma-panic-ctrl-offsets",
3317 mdata->dma_pipes, mdata->ndma_pipes);
3318 }
3319
3320 len = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-per-pipe-panic-luts");
3321 if (len != 4) {
3322 pr_debug("Unable to read per-pipe-panic-luts\n");
3323 } else {
3324 rc = mdss_mdp_parse_dt_handler(pdev,
3325 "qcom,mdss-per-pipe-panic-luts", data, len);
3326 mdata->default_panic_lut_per_pipe_linear = data[0];
3327 mdata->default_panic_lut_per_pipe_tile = data[1];
3328 mdata->default_robust_lut_per_pipe_linear = data[2];
3329 mdata->default_robust_lut_per_pipe_tile = data[3];
3330 pr_debug("per pipe panic lut [0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x\n",
3331 data[0], data[1], data[2], data[3]);
3332 }
3333
3334parse_fail:
3335 return rc;
3336}
3337
3338static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev)
3339{
3340
3341 u32 nmixers, npingpong;
3342 int rc = 0;
3343 u32 *mixer_offsets = NULL, *dspp_offsets = NULL,
3344 *pingpong_offsets = NULL;
3345 u32 is_virtual_mixer_req = false;
3346
3347 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3348
3349 mdata->nmixers_intf = mdss_mdp_parse_dt_prop_len(pdev,
3350 "qcom,mdss-mixer-intf-off");
3351 mdata->nmixers_wb = mdss_mdp_parse_dt_prop_len(pdev,
3352 "qcom,mdss-mixer-wb-off");
3353 mdata->ndspp = mdss_mdp_parse_dt_prop_len(pdev,
3354 "qcom,mdss-dspp-off");
3355 npingpong = mdss_mdp_parse_dt_prop_len(pdev,
3356 "qcom,mdss-pingpong-off");
3357 nmixers = mdata->nmixers_intf + mdata->nmixers_wb;
3358
3359 rc = of_property_read_u32(pdev->dev.of_node,
3360 "qcom,max-mixer-width", &mdata->max_mixer_width);
3361 if (rc) {
3362 pr_err("device tree err: failed to get max mixer width\n");
3363 return -EINVAL;
3364 }
3365
3366 if (mdata->nmixers_intf < mdata->ndspp) {
3367 pr_err("device tree err: no of dspp are greater than intf mixers\n");
3368 return -EINVAL;
3369 }
3370
3371 if (mdata->nmixers_intf != npingpong) {
3372 pr_err("device tree err: unequal no of pingpong and intf mixers\n");
3373 return -EINVAL;
3374 }
3375
3376 mixer_offsets = kcalloc(nmixers, sizeof(u32), GFP_KERNEL);
3377 if (!mixer_offsets)
3378 return -ENOMEM;
3379
3380 dspp_offsets = kcalloc(mdata->ndspp, sizeof(u32), GFP_KERNEL);
3381 if (!dspp_offsets) {
3382 rc = -ENOMEM;
3383 goto dspp_alloc_fail;
3384 }
3385 pingpong_offsets = kcalloc(npingpong, sizeof(u32), GFP_KERNEL);
3386 if (!pingpong_offsets) {
3387 rc = -ENOMEM;
3388 goto pingpong_alloc_fail;
3389 }
3390
3391 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-intf-off",
3392 mixer_offsets, mdata->nmixers_intf);
3393 if (rc)
3394 goto parse_done;
3395
3396 mdata->has_separate_rotator = of_property_read_bool(pdev->dev.of_node,
3397 "qcom,mdss-has-separate-rotator");
3398 if (mdata->nmixers_wb) {
3399 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-wb-off",
3400 mixer_offsets + mdata->nmixers_intf,
3401 mdata->nmixers_wb);
3402 if (rc)
3403 goto parse_done;
3404 } else if (!mdata->has_separate_rotator) {
3405 /*
3406 * If writeback mixers are not available, put the number of
3407 * writeback mixers equal to number of DMA pipes so that
3408 * later same number of virtual writeback mixers can be
3409 * allocated.
3410 */
3411 mdata->nmixers_wb = mdata->ndma_pipes;
3412 is_virtual_mixer_req = true;
3413 }
3414
3415 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dspp-off",
3416 dspp_offsets, mdata->ndspp);
3417 if (rc)
3418 goto parse_done;
3419
3420 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pingpong-off",
3421 pingpong_offsets, npingpong);
3422 if (rc)
3423 goto parse_done;
3424
3425 rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets,
3426 dspp_offsets, pingpong_offsets,
3427 MDSS_MDP_MIXER_TYPE_INTF, mdata->nmixers_intf);
3428 if (rc)
3429 goto parse_done;
3430
3431 if (mdata->nmixers_wb) {
3432 if (is_virtual_mixer_req) {
3433 /*
3434 * Replicate last interface mixers based on number of
3435 * dma pipes available as virtual writeback mixers.
3436 */
3437 rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
3438 mdata->nmixers_intf - mdata->ndma_pipes,
3439 NULL, NULL, MDSS_MDP_MIXER_TYPE_WRITEBACK,
3440 mdata->nmixers_wb);
3441 if (rc)
3442 goto parse_done;
3443 } else {
3444 rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
3445 mdata->nmixers_intf, NULL, NULL,
3446 MDSS_MDP_MIXER_TYPE_WRITEBACK,
3447 mdata->nmixers_wb);
3448 if (rc)
3449 goto parse_done;
3450 }
3451 }
3452
3453parse_done:
3454 kfree(pingpong_offsets);
3455pingpong_alloc_fail:
3456 kfree(dspp_offsets);
3457dspp_alloc_fail:
3458 kfree(mixer_offsets);
3459
3460 return rc;
3461}
3462
3463static int mdss_mdp_cdm_addr_setup(struct mdss_data_type *mdata,
3464 u32 *cdm_offsets, u32 len)
3465{
3466 struct mdss_mdp_cdm *head;
3467 u32 i = 0;
3468
3469 head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_cdm) *
3470 len, GFP_KERNEL);
3471 if (!head)
3472 return -ENOMEM;
3473
3474 for (i = 0; i < len; i++) {
3475 head[i].num = i;
3476 head[i].base = (mdata->mdss_io.base) + cdm_offsets[i];
3477 atomic_set(&head[i].kref.refcount, 0);
3478 mutex_init(&head[i].lock);
3479 init_completion(&head[i].free_comp);
3480 pr_debug("%s: cdm off (%d) = %pK\n", __func__, i, head[i].base);
3481 }
3482
3483 mdata->cdm_off = head;
3484 mutex_init(&mdata->cdm_lock);
3485 return 0;
3486}
3487
3488static int mdss_mdp_parse_dt_cdm(struct platform_device *pdev)
3489{
3490 int rc = 0;
3491 u32 *cdm_offsets = NULL;
3492 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3493
3494 mdata->ncdm = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-cdm-off");
3495
3496 if (!mdata->ncdm) {
3497 rc = 0;
3498 pr_debug("%s: No CDM offsets present in DT\n", __func__);
3499 goto end;
3500 }
3501 pr_debug("%s: cdm len == %d\n", __func__, mdata->ncdm);
3502 cdm_offsets = kcalloc(mdata->ncdm, sizeof(u32), GFP_KERNEL);
3503 if (!cdm_offsets) {
3504 rc = -ENOMEM;
3505 mdata->ncdm = 0;
3506 goto end;
3507 }
3508
3509 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-cdm-off", cdm_offsets,
3510 mdata->ncdm);
3511 if (rc) {
3512 pr_err("device tree err: failed to get cdm offsets\n");
3513 goto fail;
3514 }
3515
3516 rc = mdss_mdp_cdm_addr_setup(mdata, cdm_offsets, mdata->ncdm);
3517 if (rc) {
3518 pr_err("%s: CDM address setup failed\n", __func__);
3519 goto fail;
3520 }
3521
3522fail:
3523 kfree(cdm_offsets);
3524 if (rc)
3525 mdata->ncdm = 0;
3526end:
3527 return rc;
3528}
3529
3530static int mdss_mdp_dsc_addr_setup(struct mdss_data_type *mdata,
3531 u32 *dsc_offsets, u32 len)
3532{
3533 struct mdss_mdp_dsc *head;
3534 u32 i = 0;
3535
3536 head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_dsc) *
3537 len, GFP_KERNEL);
3538 if (!head)
3539 return -ENOMEM;
3540
3541 for (i = 0; i < len; i++) {
3542 head[i].num = i;
3543 head[i].base = (mdata->mdss_io.base) + dsc_offsets[i];
3544 pr_debug("dsc off (%d) = %pK\n", i, head[i].base);
3545 }
3546
3547 mdata->dsc_off = head;
3548 return 0;
3549}
3550
3551static int mdss_mdp_parse_dt_dsc(struct platform_device *pdev)
3552{
3553 int rc = 0;
3554 u32 *dsc_offsets = NULL;
3555 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3556
3557 mdata->ndsc = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-dsc-off");
3558 if (!mdata->ndsc) {
3559 rc = 0;
3560 pr_debug("No DSC offsets present in DT\n");
3561 goto end;
3562 }
3563 pr_debug("dsc len == %d\n", mdata->ndsc);
3564
3565 dsc_offsets = kcalloc(mdata->ndsc, sizeof(u32), GFP_KERNEL);
3566 if (!dsc_offsets) {
3567 rc = -ENOMEM;
3568 mdata->ndsc = 0;
3569 goto end;
3570 }
3571
3572 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dsc-off", dsc_offsets,
3573 mdata->ndsc);
3574 if (rc) {
3575 pr_err("device tree err: failed to get cdm offsets\n");
3576 goto fail;
3577 }
3578
3579 rc = mdss_mdp_dsc_addr_setup(mdata, dsc_offsets, mdata->ndsc);
3580 if (rc) {
3581 pr_err("%s: DSC address setup failed\n", __func__);
3582 goto fail;
3583 }
3584
3585fail:
3586 kfree(dsc_offsets);
3587 if (rc)
3588 mdata->ndsc = 0;
3589end:
3590 return rc;
3591}
3592
3593static int mdss_mdp_parse_dt_wb(struct platform_device *pdev)
3594{
3595 int rc = 0;
3596 u32 *wb_offsets = NULL;
3597 u32 num_wb_mixer, nwb_offsets, num_intf_wb = 0;
3598 const char *wfd_data;
3599 struct mdss_data_type *mdata;
3600
3601 mdata = platform_get_drvdata(pdev);
3602
3603 num_wb_mixer = mdata->nmixers_wb;
3604
3605 wfd_data = of_get_property(pdev->dev.of_node,
3606 "qcom,mdss-wfd-mode", NULL);
3607 if (wfd_data && strcmp(wfd_data, "shared") != 0)
3608 num_intf_wb = 1;
3609
3610 nwb_offsets = mdss_mdp_parse_dt_prop_len(pdev,
3611 "qcom,mdss-wb-off");
3612
3613 wb_offsets = kcalloc(nwb_offsets, sizeof(u32), GFP_KERNEL);
3614 if (!wb_offsets)
3615 return -ENOMEM;
3616
3617 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-wb-off",
3618 wb_offsets, nwb_offsets);
3619 if (rc)
3620 goto wb_parse_done;
3621
3622 rc = mdss_mdp_wb_addr_setup(mdata, num_wb_mixer, num_intf_wb);
3623 if (rc)
3624 goto wb_parse_done;
3625
3626 mdata->nwb_offsets = nwb_offsets;
3627 mdata->wb_offsets = wb_offsets;
3628
3629 return 0;
3630
3631wb_parse_done:
3632 kfree(wb_offsets);
3633 return rc;
3634}
3635
3636static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev)
3637{
3638 int rc = 0;
3639 u32 *ctl_offsets = NULL;
3640
3641 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3642
3643 mdata->nctl = mdss_mdp_parse_dt_prop_len(pdev,
3644 "qcom,mdss-ctl-off");
3645
3646 if (mdata->nctl < mdata->nwb) {
3647 pr_err("device tree err: number of ctl greater than wb\n");
3648 rc = -EINVAL;
3649 goto parse_done;
3650 }
3651
3652 ctl_offsets = kcalloc(mdata->nctl, sizeof(u32), GFP_KERNEL);
3653 if (!ctl_offsets)
3654 return -ENOMEM;
3655
3656 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ctl-off",
3657 ctl_offsets, mdata->nctl);
3658 if (rc)
3659 goto parse_done;
3660
3661 rc = mdss_mdp_ctl_addr_setup(mdata, ctl_offsets, mdata->nctl);
3662 if (rc)
3663 goto parse_done;
3664
3665parse_done:
3666 kfree(ctl_offsets);
3667
3668 return rc;
3669}
3670
3671static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev)
3672{
3673 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3674 u32 count;
3675 u32 *offsets;
3676 int rc;
3677
3678
3679 count = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-intf-off");
3680 if (count == 0)
3681 return -EINVAL;
3682
3683 offsets = kcalloc(count, sizeof(u32), GFP_KERNEL);
3684 if (!offsets)
3685 return -ENOMEM;
3686
3687 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-intf-off",
3688 offsets, count);
3689 if (rc)
3690 goto parse_fail;
3691
3692 rc = mdss_mdp_video_addr_setup(mdata, offsets, count);
3693 if (rc)
3694 pr_err("unable to setup video interfaces\n");
3695
3696parse_fail:
3697 kfree(offsets);
3698
3699 return rc;
3700}
3701
3702static int mdss_mdp_update_smp_map(struct platform_device *pdev,
3703 const u32 *data, int len, int pipe_cnt,
3704 struct mdss_mdp_pipe *pipes)
3705{
3706 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3707 int i, j, k;
3708 u32 cnt, mmb;
3709
3710 len /= sizeof(u32);
3711 for (i = 0, k = 0; i < len; k++) {
3712 struct mdss_mdp_pipe *pipe = NULL;
3713
3714 if (k >= pipe_cnt) {
3715 pr_err("invalid fixed mmbs\n");
3716 return -EINVAL;
3717 }
3718
3719 pipe = &pipes[k];
3720
3721 cnt = be32_to_cpu(data[i++]);
3722 if (cnt == 0)
3723 continue;
3724
3725 for (j = 0; j < cnt; j++) {
3726 mmb = be32_to_cpu(data[i++]);
3727 if (mmb > mdata->smp_mb_cnt) {
3728 pr_err("overflow mmb:%d pipe:%d: max:%d\n",
3729 mmb, k, mdata->smp_mb_cnt);
3730 return -EINVAL;
3731 }
3732 set_bit(mmb, pipe->smp_map[0].fixed);
3733 }
3734 if (bitmap_intersects(pipe->smp_map[0].fixed,
3735 mdata->mmb_alloc_map,
3736 mdata->smp_mb_cnt)) {
3737 pr_err("overlapping fixed mmb map\n");
3738 return -EINVAL;
3739 }
3740 bitmap_or(mdata->mmb_alloc_map, pipe->smp_map[0].fixed,
3741 mdata->mmb_alloc_map, mdata->smp_mb_cnt);
3742 }
3743 return 0;
3744}
3745
3746static int mdss_mdp_parse_dt_smp(struct platform_device *pdev)
3747{
3748 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3749 u32 num;
3750 u32 data[2];
3751 int rc, len;
3752 const u32 *arr;
3753
3754 num = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-smp-data");
3755 /*
3756 * This property is optional for targets with fix pixel ram. Rest
3757 * must provide no. of smp and size of each block.
3758 */
3759 if (!num)
3760 return 0;
3761 else if (num != 2)
3762 return -EINVAL;
3763
3764 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-smp-data", data, num);
3765 if (rc)
3766 return rc;
3767
3768 rc = mdss_mdp_smp_setup(mdata, data[0], data[1]);
3769
3770 if (rc) {
3771 pr_err("unable to setup smp data\n");
3772 return rc;
3773 }
3774
3775 rc = of_property_read_u32(pdev->dev.of_node,
3776 "qcom,mdss-smp-mb-per-pipe", data);
3777 mdata->smp_mb_per_pipe = (!rc ? data[0] : 0);
3778
3779 rc = 0;
3780 arr = of_get_property(pdev->dev.of_node,
3781 "qcom,mdss-pipe-rgb-fixed-mmb", &len);
3782 if (arr) {
3783 rc = mdss_mdp_update_smp_map(pdev, arr, len,
3784 mdata->nrgb_pipes, mdata->rgb_pipes);
3785
3786 if (rc)
3787 pr_warn("unable to update smp map for RGB pipes\n");
3788 }
3789
3790 arr = of_get_property(pdev->dev.of_node,
3791 "qcom,mdss-pipe-vig-fixed-mmb", &len);
3792 if (arr) {
3793 rc = mdss_mdp_update_smp_map(pdev, arr, len,
3794 mdata->nvig_pipes, mdata->vig_pipes);
3795
3796 if (rc)
3797 pr_warn("unable to update smp map for VIG pipes\n");
3798 }
3799 return rc;
3800}
3801
3802static void mdss_mdp_parse_dt_fudge_factors(struct platform_device *pdev,
3803 char *prop_name, struct mult_factor *ff)
3804{
3805 int rc;
3806 u32 data[2] = {1, 1};
3807
3808 rc = mdss_mdp_parse_dt_handler(pdev, prop_name, data, 2);
3809 if (rc) {
3810 pr_debug("err reading %s\n", prop_name);
3811 } else {
3812 ff->numer = data[0];
3813 ff->denom = data[1];
3814 }
3815}
3816
3817static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev)
3818{
3819 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3820 struct mdss_prefill_data *prefill = &mdata->prefill_data;
3821 int rc;
3822
3823 rc = of_property_read_u32(pdev->dev.of_node,
3824 "qcom,mdss-prefill-outstanding-buffer-bytes",
3825 &prefill->ot_bytes);
3826 if (rc) {
3827 pr_err("prefill outstanding buffer bytes not specified\n");
3828 return rc;
3829 }
3830
3831 rc = of_property_read_u32(pdev->dev.of_node,
3832 "qcom,mdss-prefill-y-buffer-bytes", &prefill->y_buf_bytes);
3833 if (rc) {
3834 pr_err("prefill y buffer bytes not specified\n");
3835 return rc;
3836 }
3837
3838 rc = of_property_read_u32(pdev->dev.of_node,
3839 "qcom,mdss-prefill-scaler-buffer-lines-bilinear",
3840 &prefill->y_scaler_lines_bilinear);
3841 if (rc) {
3842 pr_err("prefill scaler lines for bilinear not specified\n");
3843 return rc;
3844 }
3845
3846 rc = of_property_read_u32(pdev->dev.of_node,
3847 "qcom,mdss-prefill-scaler-buffer-lines-caf",
3848 &prefill->y_scaler_lines_caf);
3849 if (rc) {
3850 pr_debug("prefill scaler lines for caf not specified\n");
3851 return rc;
3852 }
3853
3854 rc = of_property_read_u32(pdev->dev.of_node,
3855 "qcom,mdss-prefill-post-scaler-buffer-pixels",
3856 &prefill->post_scaler_pixels);
3857 if (rc) {
3858 pr_err("prefill post scaler buffer pixels not specified\n");
3859 return rc;
3860 }
3861
3862 rc = of_property_read_u32(pdev->dev.of_node,
3863 "qcom,mdss-prefill-pingpong-buffer-pixels",
3864 &prefill->pp_pixels);
3865 if (rc) {
3866 pr_err("prefill pingpong buffer lines not specified\n");
3867 return rc;
3868 }
3869
3870 rc = of_property_read_u32(pdev->dev.of_node,
3871 "qcom,mdss-prefill-fbc-lines", &prefill->fbc_lines);
3872 if (rc)
3873 pr_debug("prefill FBC lines not specified\n");
3874
3875 return 0;
3876}
3877
3878static void mdss_mdp_parse_vbif_qos(struct platform_device *pdev)
3879{
3880 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3881 int rc;
3882
3883 mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
3884 "qcom,mdss-vbif-qos-rt-setting");
3885 if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
3886 mdata->vbif_rt_qos = kcalloc(mdata->npriority_lvl,
3887 sizeof(u32), GFP_KERNEL);
3888 if (!mdata->vbif_rt_qos)
3889 return;
3890
3891 rc = mdss_mdp_parse_dt_handler(pdev,
3892 "qcom,mdss-vbif-qos-rt-setting",
3893 mdata->vbif_rt_qos, mdata->npriority_lvl);
3894 if (rc) {
3895 pr_debug("rt setting not found\n");
3896 return;
3897 }
3898 } else {
3899 mdata->npriority_lvl = 0;
3900 pr_debug("Invalid or no vbif qos rt setting\n");
3901 return;
3902 }
3903
3904 mdata->npriority_lvl = mdss_mdp_parse_dt_prop_len(pdev,
3905 "qcom,mdss-vbif-qos-nrt-setting");
3906 if (mdata->npriority_lvl == MDSS_VBIF_QOS_REMAP_ENTRIES) {
3907 mdata->vbif_nrt_qos = kcalloc(mdata->npriority_lvl,
3908 sizeof(u32), GFP_KERNEL);
3909 if (!mdata->vbif_nrt_qos)
3910 return;
3911
3912 rc = mdss_mdp_parse_dt_handler(pdev,
3913 "qcom,mdss-vbif-qos-nrt-setting", mdata->vbif_nrt_qos,
3914 mdata->npriority_lvl);
3915 if (rc) {
3916 pr_debug("nrt setting not found\n");
3917 return;
3918 }
3919 } else {
3920 mdata->npriority_lvl = 0;
3921 pr_debug("Invalid or no vbif qos nrt seting\n");
3922 }
3923}
3924
3925static void mdss_mdp_parse_max_bw_array(const u32 *arr,
3926 struct mdss_max_bw_settings *max_bw_settings, int count)
3927{
3928 int i;
3929
3930 for (i = 0; i < count; i++) {
3931 max_bw_settings->mdss_max_bw_mode = be32_to_cpu(arr[i*2]);
3932 max_bw_settings->mdss_max_bw_val = be32_to_cpu(arr[(i*2)+1]);
3933 max_bw_settings++;
3934 }
3935}
3936
3937static void mdss_mdp_parse_max_bandwidth(struct platform_device *pdev)
3938{
3939 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3940 struct mdss_max_bw_settings *max_bw_settings;
3941 int max_bw_settings_cnt = 0;
3942 const u32 *max_bw;
3943
3944 max_bw = of_get_property(pdev->dev.of_node, "qcom,max-bw-settings",
3945 &max_bw_settings_cnt);
3946
3947 if (!max_bw || !max_bw_settings_cnt) {
3948 pr_debug("MDSS max bandwidth settings not found\n");
3949 return;
3950 }
3951
3952 max_bw_settings_cnt /= 2 * sizeof(u32);
3953
3954 max_bw_settings = devm_kzalloc(&pdev->dev, sizeof(*max_bw_settings)
3955 * max_bw_settings_cnt, GFP_KERNEL);
3956 if (!max_bw_settings)
3957 return;
3958
3959 mdss_mdp_parse_max_bw_array(max_bw, max_bw_settings,
3960 max_bw_settings_cnt);
3961
3962 mdata->max_bw_settings = max_bw_settings;
3963 mdata->max_bw_settings_cnt = max_bw_settings_cnt;
3964}
3965
3966static void mdss_mdp_parse_per_pipe_bandwidth(struct platform_device *pdev)
3967{
3968
3969 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
3970 struct mdss_max_bw_settings *max_bw_per_pipe_settings;
3971 int max_bw_settings_cnt = 0;
3972 const u32 *max_bw_settings;
3973 u32 max_bw, min_bw, threshold, i = 0;
3974
3975 max_bw_settings = of_get_property(pdev->dev.of_node,
3976 "qcom,max-bandwidth-per-pipe-kbps",
3977 &max_bw_settings_cnt);
3978
3979 if (!max_bw_settings || !max_bw_settings_cnt) {
3980 pr_debug("MDSS per pipe max bandwidth settings not found\n");
3981 return;
3982 }
3983
3984 /* Support targets where a common per pipe max bw is provided */
3985 if ((max_bw_settings_cnt / sizeof(u32)) == 1) {
3986 mdata->max_bw_per_pipe = be32_to_cpu(max_bw_settings[0]);
3987 mdata->max_per_pipe_bw_settings = NULL;
3988 pr_debug("Common per pipe max bandwidth provided\n");
3989 return;
3990 }
3991
3992 max_bw_settings_cnt /= 2 * sizeof(u32);
3993
3994 max_bw_per_pipe_settings = devm_kzalloc(&pdev->dev,
3995 sizeof(struct mdss_max_bw_settings) * max_bw_settings_cnt,
3996 GFP_KERNEL);
3997 if (!max_bw_per_pipe_settings) {
3998 pr_err("Memory allocation failed for max_bw_settings\n");
3999 return;
4000 }
4001
4002 mdss_mdp_parse_max_bw_array(max_bw_settings, max_bw_per_pipe_settings,
4003 max_bw_settings_cnt);
4004 mdata->max_per_pipe_bw_settings = max_bw_per_pipe_settings;
4005 mdata->mdss_per_pipe_bw_cnt = max_bw_settings_cnt;
4006
4007 /* Calculate min and max allowed per pipe BW */
4008 min_bw = mdata->max_bw_high;
4009 max_bw = 0;
4010
4011 while (i < max_bw_settings_cnt) {
4012 threshold = mdata->max_per_pipe_bw_settings[i].mdss_max_bw_val;
4013 if (threshold > max_bw)
4014 max_bw = threshold;
4015 if (threshold < min_bw)
4016 min_bw = threshold;
4017 ++i;
4018 }
4019 mdata->max_bw_per_pipe = max_bw;
4020 mdata->min_bw_per_pipe = min_bw;
4021}
4022
4023static int mdss_mdp_parse_dt_misc(struct platform_device *pdev)
4024{
4025 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4026 u32 data, slave_pingpong_off;
4027 const char *wfd_data;
4028 int rc;
4029 struct property *prop = NULL;
4030
4031 rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
4032 &data);
4033 mdata->rot_block_size = (!rc ? data : 128);
4034
4035 rc = of_property_read_u32(pdev->dev.of_node,
4036 "qcom,mdss-default-ot-rd-limit", &data);
4037 mdata->default_ot_rd_limit = (!rc ? data : 0);
4038
4039 rc = of_property_read_u32(pdev->dev.of_node,
4040 "qcom,mdss-default-ot-wr-limit", &data);
4041 mdata->default_ot_wr_limit = (!rc ? data : 0);
4042
4043 mdata->has_non_scalar_rgb = of_property_read_bool(pdev->dev.of_node,
4044 "qcom,mdss-has-non-scalar-rgb");
4045 mdata->has_bwc = of_property_read_bool(pdev->dev.of_node,
4046 "qcom,mdss-has-bwc");
4047 mdata->has_decimation = of_property_read_bool(pdev->dev.of_node,
4048 "qcom,mdss-has-decimation");
4049 mdata->has_no_lut_read = of_property_read_bool(pdev->dev.of_node,
4050 "qcom,mdss-no-lut-read");
4051 mdata->needs_hist_vote = !(of_property_read_bool(pdev->dev.of_node,
4052 "qcom,mdss-no-hist-vote"));
4053 wfd_data = of_get_property(pdev->dev.of_node,
4054 "qcom,mdss-wfd-mode", NULL);
4055 if (wfd_data) {
4056 pr_debug("wfd mode: %s\n", wfd_data);
4057 if (!strcmp(wfd_data, "intf")) {
4058 mdata->wfd_mode = MDSS_MDP_WFD_INTERFACE;
4059 } else if (!strcmp(wfd_data, "shared")) {
4060 mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
4061 } else if (!strcmp(wfd_data, "dedicated")) {
4062 mdata->wfd_mode = MDSS_MDP_WFD_DEDICATED;
4063 } else {
4064 pr_debug("wfd default mode: Shared\n");
4065 mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
4066 }
4067 } else {
4068 pr_warn("wfd mode not configured. Set to default: Shared\n");
4069 mdata->wfd_mode = MDSS_MDP_WFD_SHARED;
4070 }
4071
4072 mdata->has_src_split = of_property_read_bool(pdev->dev.of_node,
4073 "qcom,mdss-has-source-split");
4074 mdata->has_fixed_qos_arbiter_enabled =
4075 of_property_read_bool(pdev->dev.of_node,
4076 "qcom,mdss-has-fixed-qos-arbiter-enabled");
4077 mdata->idle_pc_enabled = of_property_read_bool(pdev->dev.of_node,
4078 "qcom,mdss-idle-power-collapse-enabled");
4079
4080 prop = of_find_property(pdev->dev.of_node, "batfet-supply", NULL);
4081 mdata->batfet_required = prop ? true : false;
4082 mdata->en_svs_high = of_property_read_bool(pdev->dev.of_node,
4083 "qcom,mdss-en-svs-high");
4084 if (!mdata->en_svs_high)
4085 pr_debug("%s: svs_high is not enabled\n", __func__);
4086 rc = of_property_read_u32(pdev->dev.of_node,
4087 "qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit));
4088 if (rc)
4089 pr_debug("Could not read optional property: highest bank bit\n");
4090
4091 mdata->has_pingpong_split = of_property_read_bool(pdev->dev.of_node,
4092 "qcom,mdss-has-pingpong-split");
4093
4094 if (mdata->has_pingpong_split) {
4095 rc = of_property_read_u32(pdev->dev.of_node,
4096 "qcom,mdss-slave-pingpong-off",
4097 &slave_pingpong_off);
4098 if (rc) {
4099 pr_err("Error in device tree: slave pingpong offset\n");
4100 return rc;
4101 }
4102 mdata->slave_pingpong_base = mdata->mdss_io.base +
4103 slave_pingpong_off;
4104 rc = mdss_mdp_parse_dt_ppb_off(pdev);
4105 if (rc) {
4106 pr_err("Error in device tree: ppb offset not configured\n");
4107 return rc;
4108 }
4109 }
4110
4111 /*
4112 * 2x factor on AB because bus driver will divide by 2
4113 * due to 2x ports to BIMC
4114 */
4115 mdata->ab_factor.numer = 2;
4116 mdata->ab_factor.denom = 1;
4117 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ab-factor",
4118 &mdata->ab_factor);
4119
4120 /*
4121 * 1.2 factor on ib as default value. This value is
4122 * experimentally determined and should be tuned in device
4123 * tree.
4124 */
4125 mdata->ib_factor.numer = 6;
4126 mdata->ib_factor.denom = 5;
4127 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor",
4128 &mdata->ib_factor);
4129
4130 /*
4131 * Set overlap ib value equal to ib by default. This value can
4132 * be tuned in device tree to be different from ib.
4133 * This factor apply when the max bandwidth per pipe
4134 * is the overlap BW.
4135 */
4136 mdata->ib_factor_overlap.numer = mdata->ib_factor.numer;
4137 mdata->ib_factor_overlap.denom = mdata->ib_factor.denom;
4138 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor-overlap",
4139 &mdata->ib_factor_overlap);
4140
4141 mdata->clk_factor.numer = 1;
4142 mdata->clk_factor.denom = 1;
4143 mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor",
4144 &mdata->clk_factor);
4145
4146 rc = of_property_read_u32(pdev->dev.of_node,
4147 "qcom,max-bandwidth-low-kbps", &mdata->max_bw_low);
4148 if (rc)
4149 pr_debug("max bandwidth (low) property not specified\n");
4150
4151 rc = of_property_read_u32(pdev->dev.of_node,
4152 "qcom,max-bandwidth-high-kbps", &mdata->max_bw_high);
4153 if (rc)
4154 pr_debug("max bandwidth (high) property not specified\n");
4155
4156 mdss_mdp_parse_per_pipe_bandwidth(pdev);
4157
4158 mdss_mdp_parse_max_bandwidth(pdev);
4159
4160 mdata->nclk_lvl = mdss_mdp_parse_dt_prop_len(pdev,
4161 "qcom,mdss-clk-levels");
4162
4163 if (mdata->nclk_lvl) {
4164 mdata->clock_levels = kcalloc(mdata->nclk_lvl, sizeof(u32),
4165 GFP_KERNEL);
4166 if (!mdata->clock_levels)
4167 return -ENOMEM;
4168
4169 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-clk-levels",
4170 mdata->clock_levels, mdata->nclk_lvl);
4171 if (rc)
4172 pr_debug("clock levels not found\n");
4173 }
4174
4175 mdss_mdp_parse_vbif_qos(pdev);
4176 mdata->traffic_shaper_en = of_property_read_bool(pdev->dev.of_node,
4177 "qcom,mdss-traffic-shaper-enabled");
4178 mdata->has_rot_dwnscale = of_property_read_bool(pdev->dev.of_node,
4179 "qcom,mdss-has-rotator-downscale");
4180 if (mdata->has_rot_dwnscale) {
4181 rc = of_property_read_u32(pdev->dev.of_node,
4182 "qcom,mdss-rot-downscale-min",
4183 &mdata->rot_dwnscale_min);
4184 if (rc)
4185 pr_err("Min rotator downscale property not specified\n");
4186
4187 rc = of_property_read_u32(pdev->dev.of_node,
4188 "qcom,mdss-rot-downscale-max",
4189 &mdata->rot_dwnscale_max);
4190 if (rc)
4191 pr_err("Max rotator downscale property not specified\n");
4192 }
4193
4194 rc = of_property_read_u32(pdev->dev.of_node,
4195 "qcom,mdss-dram-channels", &mdata->bus_channels);
4196 if (rc)
4197 pr_debug("number of channels property not specified\n");
4198
4199 rc = of_property_read_u32(pdev->dev.of_node,
4200 "qcom,max-pipe-width", &mdata->max_pipe_width);
4201 if (rc) {
4202 pr_debug("max pipe width not specified. Using default value\n");
4203 mdata->max_pipe_width = DEFAULT_MDP_PIPE_WIDTH;
4204 }
4205 return 0;
4206}
4207
4208static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev)
4209{
4210 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4211 u32 *ad_offsets = NULL;
4212 int rc;
4213
4214 mdata->nad_cfgs = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-ad-off");
4215
4216 if (mdata->nad_cfgs == 0) {
4217 mdata->ad_cfgs = NULL;
4218 return 0;
4219 }
4220
4221 if (mdata->nad_cfgs > mdata->nmixers_intf)
4222 return -EINVAL;
4223
4224
4225 mdata->has_wb_ad = of_property_read_bool(pdev->dev.of_node,
4226 "qcom,mdss-has-wb-ad");
4227
4228 ad_offsets = kcalloc(mdata->nad_cfgs, sizeof(u32), GFP_KERNEL);
4229 if (!ad_offsets)
4230 return -ENOMEM;
4231
4232 rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ad-off", ad_offsets,
4233 mdata->nad_cfgs);
4234 if (rc)
4235 goto parse_done;
4236
4237 rc = mdss_mdp_ad_addr_setup(mdata, ad_offsets);
4238 if (rc)
4239 pr_err("unable to setup assertive display\n");
4240
4241parse_done:
4242 kfree(ad_offsets);
4243 return rc;
4244}
4245
4246static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev)
4247{
4248 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4249 u32 len, index;
4250 const u32 *arr;
4251
4252 arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-ctl-off", &len);
4253 if (arr) {
4254 mdata->nppb_ctl = len / sizeof(u32);
4255 mdata->ppb_ctl = devm_kzalloc(&mdata->pdev->dev,
4256 sizeof(u32) * mdata->nppb_ctl, GFP_KERNEL);
4257
4258 if (mdata->ppb_ctl == NULL)
4259 return -ENOMEM;
4260
4261 for (index = 0; index < mdata->nppb_ctl; index++)
4262 mdata->ppb_ctl[index] = be32_to_cpu(arr[index]);
4263 }
4264
4265 arr = of_get_property(pdev->dev.of_node, "qcom,mdss-ppb-cfg-off", &len);
4266 if (arr) {
4267 mdata->nppb_cfg = len / sizeof(u32);
4268 mdata->ppb_cfg = devm_kzalloc(&mdata->pdev->dev,
4269 sizeof(u32) * mdata->nppb_cfg, GFP_KERNEL);
4270
4271 if (mdata->ppb_cfg == NULL)
4272 return -ENOMEM;
4273
4274 for (index = 0; index < mdata->nppb_cfg; index++)
4275 mdata->ppb_cfg[index] = be32_to_cpu(arr[index]);
4276 }
4277 return 0;
4278}
4279
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05304280#ifdef CONFIG_QCOM_BUS_SCALING
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304281static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
4282{
4283 int rc, paths;
4284 struct device_node *node;
4285 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4286
4287 rc = of_property_read_u32(pdev->dev.of_node,
4288 "qcom,msm-bus,num-paths", &paths);
4289 if (rc) {
4290 pr_err("Error. qcom,msm-bus,num-paths prop not found.rc=%d\n",
4291 rc);
4292 return rc;
4293 }
4294 mdss_res->axi_port_cnt = paths;
4295
4296 rc = of_property_read_u32(pdev->dev.of_node,
4297 "qcom,mdss-num-nrt-paths", &mdata->nrt_axi_port_cnt);
4298 if (rc && mdata->has_fixed_qos_arbiter_enabled) {
4299 pr_err("Error. qcom,mdss-num-nrt-paths prop not found.rc=%d\n",
4300 rc);
4301 return rc;
4302 }
4303 rc = 0;
4304
4305 mdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
4306 if (IS_ERR_OR_NULL(mdata->bus_scale_table)) {
4307 rc = PTR_ERR(mdata->bus_scale_table);
4308 if (!rc)
4309 rc = -EINVAL;
4310 pr_err("msm_bus_cl_get_pdata failed. rc=%d\n", rc);
4311 mdata->bus_scale_table = NULL;
4312 return rc;
4313 }
4314
4315 /*
4316 * if mdss-reg-bus is not found then default table is picked
4317 * hence below code wont return error.
4318 */
4319 node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-reg-bus");
4320 if (node) {
4321 mdata->reg_bus_scale_table =
4322 msm_bus_pdata_from_node(pdev, node);
4323 if (IS_ERR_OR_NULL(mdata->reg_bus_scale_table)) {
4324 rc = PTR_ERR(mdata->reg_bus_scale_table);
4325 if (!rc)
4326 pr_err("bus_pdata reg_bus failed rc=%d\n", rc);
4327 rc = 0;
4328 mdata->reg_bus_scale_table = NULL;
4329 }
4330 } else {
4331 rc = 0;
4332 mdata->reg_bus_scale_table = NULL;
4333 pr_debug("mdss-reg-bus not found\n");
4334 }
4335
4336 node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-hw-rt-bus");
4337 if (node) {
4338 mdata->hw_rt_bus_scale_table =
4339 msm_bus_pdata_from_node(pdev, node);
4340 if (IS_ERR_OR_NULL(mdata->hw_rt_bus_scale_table)) {
4341 rc = PTR_ERR(mdata->hw_rt_bus_scale_table);
4342 if (!rc)
4343 pr_err("hw_rt_bus_scale failed rc=%d\n", rc);
4344 rc = 0;
4345 mdata->hw_rt_bus_scale_table = NULL;
4346 }
4347 } else {
4348 rc = 0;
4349 mdata->hw_rt_bus_scale_table = NULL;
4350 pr_debug("mdss-hw-rt-bus not found\n");
4351 }
4352
4353 return rc;
4354}
4355#else
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05304356__maybe_unused
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304357static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
4358{
4359 return 0;
4360}
4361
4362#endif
4363
4364static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
4365 char *prop_name, u32 *offsets, int len)
4366{
4367 int rc;
4368
4369 rc = of_property_read_u32_array(pdev->dev.of_node, prop_name,
4370 offsets, len);
4371 if (rc) {
4372 pr_err("Error from prop %s : u32 array read\n", prop_name);
4373 return -EINVAL;
4374 }
4375
4376 return 0;
4377}
4378
4379static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
4380 char *prop_name)
4381{
4382 int len = 0;
4383
4384 of_find_property(pdev->dev.of_node, prop_name, &len);
4385
4386 if (len < 1) {
4387 pr_debug("prop %s : doesn't exist in device tree\n",
4388 prop_name);
4389 return 0;
4390 }
4391
4392 len = len/sizeof(u32);
4393
4394 return len;
4395}
4396
4397struct mdss_data_type *mdss_mdp_get_mdata(void)
4398{
4399 return mdss_res;
4400}
4401
4402void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable)
4403{
4404 int ret;
4405
4406 if (!mdata->batfet_required)
4407 return;
4408
4409 if (!mdata->batfet) {
4410 if (enable) {
4411 mdata->batfet = devm_regulator_get(&mdata->pdev->dev,
4412 "batfet");
4413 if (IS_ERR_OR_NULL(mdata->batfet)) {
4414 pr_debug("unable to get batfet reg. rc=%d\n",
4415 PTR_RET(mdata->batfet));
4416 mdata->batfet = NULL;
4417 return;
4418 }
4419 } else {
4420 pr_debug("Batfet regulator disable w/o enable\n");
4421 return;
4422 }
4423 }
4424
4425 if (enable) {
4426 ret = regulator_enable(mdata->batfet);
4427 if (ret)
4428 pr_err("regulator_enable failed\n");
4429 } else {
4430 regulator_disable(mdata->batfet);
4431 }
4432}
4433
4434/**
4435 * mdss_is_ready() - checks if mdss is probed and ready
4436 *
4437 * Checks if mdss resources have been initialized
4438 *
4439 * returns true if mdss is ready, else returns false
4440 */
4441bool mdss_is_ready(void)
4442{
4443 return mdss_mdp_get_mdata() ? true : false;
4444}
4445EXPORT_SYMBOL(mdss_mdp_get_mdata);
4446
4447/**
4448 * mdss_panel_intf_type() - checks if a given intf type is primary
4449 * @intf_val: panel interface type of the individual controller
4450 *
4451 * Individual controller queries with MDP to check if it is
4452 * configured as the primary interface.
4453 *
4454 * returns a pointer to the configured structure mdss_panel_cfg
4455 * to the controller that's configured as the primary panel interface.
4456 * returns NULL on error or if @intf_val is not the configured
4457 * controller.
4458 */
4459struct mdss_panel_cfg *mdss_panel_intf_type(int intf_val)
4460{
4461 if (!mdss_res || !mdss_res->pan_cfg.init_done)
4462 return ERR_PTR(-EPROBE_DEFER);
4463
4464 if (mdss_res->pan_cfg.pan_intf == intf_val)
4465 return &mdss_res->pan_cfg;
4466 else
4467 return NULL;
4468}
4469EXPORT_SYMBOL(mdss_panel_intf_type);
4470
4471struct irq_info *mdss_intr_line()
4472{
4473 return mdss_mdp_hw.irq_info;
4474}
4475EXPORT_SYMBOL(mdss_intr_line);
4476
4477int mdss_mdp_wait_for_xin_halt(u32 xin_id, bool is_vbif_nrt)
4478{
4479 void __iomem *vbif_base;
4480 u32 status;
4481 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4482 u32 idle_mask = BIT(xin_id);
4483 int rc;
4484
4485 vbif_base = is_vbif_nrt ? mdata->vbif_nrt_io.base :
4486 mdata->vbif_io.base;
4487
4488 rc = readl_poll_timeout(vbif_base + MMSS_VBIF_XIN_HALT_CTRL1,
4489 status, (status & idle_mask),
4490 1000, XIN_HALT_TIMEOUT_US);
4491 if (rc == -ETIMEDOUT) {
4492 pr_err("VBIF client %d not halting. TIMEDOUT.\n",
4493 xin_id);
4494 MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
4495 "dbg_bus", "vbif_dbg_bus", "panic");
4496 } else {
4497 pr_debug("VBIF client %d is halted\n", xin_id);
4498 }
4499
4500 return rc;
4501}
4502
4503/**
4504 * force_on_xin_clk() - enable/disable the force-on for the pipe clock
4505 * @bit_off: offset of the bit to enable/disable the force-on.
4506 * @reg_off: register offset for the clock control.
4507 * @enable: boolean to indicate if the force-on of the clock needs to be
4508 * enabled or disabled.
4509 *
4510 * This function returns:
4511 * true - if the clock is forced-on by this function
4512 * false - if the clock was already forced on
4513 * It is the caller responsibility to check if this function is forcing
4514 * the clock on; if so, it will need to remove the force of the clock,
4515 * otherwise it should avoid to remove the force-on.
4516 * Clocks must be on when calling this function.
4517 */
4518bool force_on_xin_clk(u32 bit_off, u32 clk_ctl_reg_off, bool enable)
4519{
4520 u32 val;
4521 u32 force_on_mask;
4522 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4523 bool clk_forced_on = false;
4524
4525 force_on_mask = BIT(bit_off);
4526 val = readl_relaxed(mdata->mdp_base + clk_ctl_reg_off);
4527
4528 clk_forced_on = !(force_on_mask & val);
4529
4530 if (true == enable)
4531 val |= force_on_mask;
4532 else
4533 val &= ~force_on_mask;
4534
4535 writel_relaxed(val, mdata->mdp_base + clk_ctl_reg_off);
4536
4537 return clk_forced_on;
4538}
4539
4540static void apply_dynamic_ot_limit(u32 *ot_lim,
4541 struct mdss_mdp_set_ot_params *params)
4542{
4543 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4544 u32 res, read_vbif_ot;
4545 u32 rot_ot = 4;
4546
4547 if (false == test_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map))
4548 return;
4549
4550 /* Dynamic OT setting done only for rotator and WFD */
4551 if (!((params->is_rot && params->is_yuv) || params->is_wb))
4552 return;
4553
4554 res = params->width * params->height;
4555
4556 pr_debug("w:%d h:%d rot:%d yuv:%d wb:%d res:%d fps:%d\n",
4557 params->width, params->height, params->is_rot,
4558 params->is_yuv, params->is_wb, res, params->frame_rate);
4559
4560 switch (mdata->mdp_rev) {
4561 case MDSS_MDP_HW_REV_114:
4562 /*
4563 * MDP rev is same for msm8937 and msm8940, but rotator OT
4564 * recommendations are different. Setting it based on AXI OT.
4565 */
4566 read_vbif_ot = MDSS_VBIF_READ(mdata, MMSS_VBIF_OUT_RD_LIM_CONF0,
4567 false);
4568 rot_ot = (read_vbif_ot == 0x10) ? 4 : 8;
4569 /* fall-through */
4570 case MDSS_MDP_HW_REV_115:
4571 case MDSS_MDP_HW_REV_116:
4572 if ((res <= RES_1080p) && (params->frame_rate <= 30))
4573 *ot_lim = 2;
4574 else if (params->is_rot && params->is_yuv)
4575 *ot_lim = rot_ot;
4576 else
4577 *ot_lim = 6;
4578 break;
4579 default:
4580 if (res <= RES_1080p) {
4581 *ot_lim = 2;
4582 } else if (res <= RES_UHD) {
4583 if (params->is_rot && params->is_yuv)
4584 *ot_lim = 8;
4585 else
4586 *ot_lim = 16;
4587 }
4588 break;
4589 }
4590}
4591
4592static u32 get_ot_limit(u32 reg_off, u32 bit_off,
4593 struct mdss_mdp_set_ot_params *params)
4594{
4595 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4596 u32 ot_lim = 0;
4597 u32 is_vbif_nrt, val;
4598
4599 if (mdata->default_ot_wr_limit &&
4600 (params->reg_off_vbif_lim_conf == MMSS_VBIF_WR_LIM_CONF))
4601 ot_lim = mdata->default_ot_wr_limit;
4602 else if (mdata->default_ot_rd_limit &&
4603 (params->reg_off_vbif_lim_conf == MMSS_VBIF_RD_LIM_CONF))
4604 ot_lim = mdata->default_ot_rd_limit;
4605
4606 /*
4607 * If default ot is not set from dt,
4608 * then do not configure it.
4609 */
4610 if (ot_lim == 0)
4611 goto exit;
4612
4613 /* Modify the limits if the target and the use case requires it */
4614 apply_dynamic_ot_limit(&ot_lim, params);
4615
4616 is_vbif_nrt = params->is_vbif_nrt;
4617 val = MDSS_VBIF_READ(mdata, reg_off, is_vbif_nrt);
4618 val &= (0xFF << bit_off);
4619 val = val >> bit_off;
4620
4621 if (val == ot_lim)
4622 ot_lim = 0;
4623
4624exit:
4625 pr_debug("ot_lim=%d\n", ot_lim);
4626 return ot_lim;
4627}
4628
4629void mdss_mdp_set_ot_limit(struct mdss_mdp_set_ot_params *params)
4630{
4631 struct mdss_data_type *mdata = mdss_mdp_get_mdata();
4632 u32 ot_lim;
4633 u32 reg_off_vbif_lim_conf = (params->xin_id / 4) * 4 +
4634 params->reg_off_vbif_lim_conf;
4635 u32 bit_off_vbif_lim_conf = (params->xin_id % 4) * 8;
4636 bool is_vbif_nrt = params->is_vbif_nrt;
4637 u32 reg_val;
4638 bool forced_on;
4639
4640 ot_lim = get_ot_limit(
4641 reg_off_vbif_lim_conf,
4642 bit_off_vbif_lim_conf,
4643 params) & 0xFF;
4644
4645 if (ot_lim == 0)
4646 goto exit;
4647
4648 trace_mdp_perf_set_ot(params->num, params->xin_id, ot_lim,
4649 is_vbif_nrt);
4650
4651 mutex_lock(&mdata->reg_lock);
4652
4653 forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
4654 params->reg_off_mdp_clk_ctrl, true);
4655
4656 reg_val = MDSS_VBIF_READ(mdata, reg_off_vbif_lim_conf,
4657 is_vbif_nrt);
4658 reg_val &= ~(0xFF << bit_off_vbif_lim_conf);
4659 reg_val |= (ot_lim) << bit_off_vbif_lim_conf;
4660 MDSS_VBIF_WRITE(mdata, reg_off_vbif_lim_conf, reg_val,
4661 is_vbif_nrt);
4662
4663 reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4664 is_vbif_nrt);
4665 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4666 reg_val | BIT(params->xin_id), is_vbif_nrt);
4667
4668 mutex_unlock(&mdata->reg_lock);
4669 mdss_mdp_wait_for_xin_halt(params->xin_id, is_vbif_nrt);
4670 mutex_lock(&mdata->reg_lock);
4671
4672 reg_val = MDSS_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4673 is_vbif_nrt);
4674 MDSS_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
4675 reg_val & ~BIT(params->xin_id), is_vbif_nrt);
4676
4677 if (forced_on)
4678 force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
4679 params->reg_off_mdp_clk_ctrl, false);
4680
4681 mutex_unlock(&mdata->reg_lock);
4682
4683exit:
4684 return;
4685}
4686
4687#define RPM_MISC_REQ_TYPE 0x6373696d
4688#define RPM_MISC_REQ_SVS_PLUS_KEY 0x2B737673
4689
4690static void mdss_mdp_config_cx_voltage(struct mdss_data_type *mdata, int enable)
4691{
4692 int ret = 0;
4693 static struct msm_rpm_kvp rpm_kvp;
4694 static uint8_t svs_en;
4695
4696 if (!mdata->en_svs_high)
4697 return;
4698
4699 if (!rpm_kvp.key) {
4700 rpm_kvp.key = RPM_MISC_REQ_SVS_PLUS_KEY;
4701 rpm_kvp.length = sizeof(uint64_t);
4702 pr_debug("%s: Initialized rpm_kvp structure\n", __func__);
4703 }
4704
4705 if (enable) {
4706 svs_en = 1;
4707 rpm_kvp.data = &svs_en;
4708 pr_debug("%s: voting for svs high\n", __func__);
4709 ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
4710 RPM_MISC_REQ_TYPE, 0,
4711 &rpm_kvp, 1);
4712 if (ret)
4713 pr_err("vote for active_set svs high failed: %d\n",
4714 ret);
4715 ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET,
4716 RPM_MISC_REQ_TYPE, 0,
4717 &rpm_kvp, 1);
4718 if (ret)
4719 pr_err("vote for sleep_set svs high failed: %d\n",
4720 ret);
4721 } else {
4722 svs_en = 0;
4723 rpm_kvp.data = &svs_en;
4724 pr_debug("%s: Removing vote for svs high\n", __func__);
4725 ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET,
4726 RPM_MISC_REQ_TYPE, 0,
4727 &rpm_kvp, 1);
4728 if (ret)
4729 pr_err("Remove vote:active_set svs high failed: %d\n",
4730 ret);
4731 ret = msm_rpm_send_message(MSM_RPM_CTX_SLEEP_SET,
4732 RPM_MISC_REQ_TYPE, 0,
4733 &rpm_kvp, 1);
4734 if (ret)
4735 pr_err("Remove vote:sleep_set svs high failed: %d\n",
4736 ret);
4737 }
4738}
4739
4740static int mdss_mdp_cx_ctrl(struct mdss_data_type *mdata, int enable)
4741{
4742 int rc = 0;
4743
4744 if (!mdata->vdd_cx)
4745 return rc;
4746
4747 if (enable) {
4748 rc = regulator_set_voltage(
4749 mdata->vdd_cx,
4750 RPM_REGULATOR_CORNER_SVS_SOC,
4751 RPM_REGULATOR_CORNER_SUPER_TURBO);
4752 if (rc < 0)
4753 goto vreg_set_voltage_fail;
4754
4755 pr_debug("Enabling CX power rail\n");
4756 rc = regulator_enable(mdata->vdd_cx);
4757 if (rc) {
4758 pr_err("Failed to enable regulator.\n");
4759 return rc;
4760 }
4761 } else {
4762 pr_debug("Disabling CX power rail\n");
4763 rc = regulator_disable(mdata->vdd_cx);
4764 if (rc) {
4765 pr_err("Failed to disable regulator.\n");
4766 return rc;
4767 }
4768 rc = regulator_set_voltage(
4769 mdata->vdd_cx,
4770 RPM_REGULATOR_CORNER_NONE,
4771 RPM_REGULATOR_CORNER_SUPER_TURBO);
4772 if (rc < 0)
4773 goto vreg_set_voltage_fail;
4774 }
4775
4776 return rc;
4777
4778vreg_set_voltage_fail:
4779 pr_err("Set vltg fail\n");
4780 return rc;
4781}
4782
4783/**
4784 * mdss_mdp_footswitch_ctrl() - Disable/enable MDSS GDSC and CX/Batfet rails
4785 * @mdata: MDP private data
4786 * @on: 1 to turn on footswitch, 0 to turn off footswitch
4787 *
4788 * When no active references to the MDP device node and it's child nodes are
4789 * held, MDSS GDSC can be turned off. However, any any panels are still
4790 * active (but likely in an idle state), the vote for the CX and the batfet
4791 * rails should not be released.
4792 */
4793static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
4794{
4795 int ret;
4796 int active_cnt = 0;
4797
4798 if (!mdata->fs)
4799 return;
4800
4801 MDSS_XLOG(on, mdata->fs_ena, mdata->idle_pc, mdata->en_svs_high,
4802 atomic_read(&mdata->active_intf_cnt));
4803
4804 if (on) {
4805 if (!mdata->fs_ena) {
4806 pr_debug("Enable MDP FS\n");
4807 if (mdata->venus) {
4808 ret = regulator_enable(mdata->venus);
4809 if (ret)
4810 pr_err("venus failed to enable\n");
4811 }
4812
4813 ret = regulator_enable(mdata->fs);
4814 if (ret)
4815 pr_warn("Footswitch failed to enable\n");
4816 if (!mdata->idle_pc) {
4817 mdss_mdp_cx_ctrl(mdata, true);
4818 mdss_mdp_batfet_ctrl(mdata, true);
4819 }
4820 }
4821 if (mdata->en_svs_high)
4822 mdss_mdp_config_cx_voltage(mdata, true);
4823 mdata->fs_ena = true;
4824 } else {
4825 if (mdata->fs_ena) {
4826 pr_debug("Disable MDP FS\n");
4827 active_cnt = atomic_read(&mdata->active_intf_cnt);
4828 if (active_cnt != 0) {
4829 /*
4830 * Turning off GDSC while overlays are still
4831 * active.
4832 */
4833 mdata->idle_pc = true;
4834 pr_debug("idle pc. active overlays=%d\n",
4835 active_cnt);
4836 mdss_mdp_memory_retention_enter();
4837 } else {
4838 mdss_mdp_cx_ctrl(mdata, false);
4839 mdss_mdp_batfet_ctrl(mdata, false);
4840 }
4841 if (mdata->en_svs_high)
4842 mdss_mdp_config_cx_voltage(mdata, false);
4843 regulator_disable(mdata->fs);
4844 if (mdata->venus)
4845 regulator_disable(mdata->venus);
4846 }
4847 mdata->fs_ena = false;
4848 }
4849}
4850
4851int mdss_mdp_secure_display_ctrl(struct mdss_data_type *mdata,
4852 unsigned int enable)
4853{
4854 struct sd_ctrl_req {
4855 unsigned int enable;
4856 } __attribute__ ((__packed__)) request;
4857 unsigned int resp = -1;
4858 int ret = 0;
4859 struct scm_desc desc;
4860
4861 if ((enable && (mdss_get_sd_client_cnt() > 0)) ||
4862 (!enable && (mdss_get_sd_client_cnt() > 1))) {
4863 mdss_update_sd_client(mdata, enable);
4864 return ret;
4865 }
4866
4867 desc.args[0] = request.enable = enable;
4868 desc.arginfo = SCM_ARGS(1);
4869
4870 if (!is_scm_armv8()) {
4871 ret = scm_call(SCM_SVC_MP, MEM_PROTECT_SD_CTRL,
4872 &request, sizeof(request), &resp, sizeof(resp));
4873 } else {
4874 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
4875 mem_protect_sd_ctrl_id), &desc);
4876 resp = desc.ret[0];
4877 }
4878
4879 pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x",
4880 enable, ret, resp);
4881 if (ret)
4882 return ret;
4883
4884 mdss_update_sd_client(mdata, enable);
4885 return resp;
4886}
4887
4888static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
4889{
4890 mdata->suspend_fs_ena = mdata->fs_ena;
4891 mdss_mdp_footswitch_ctrl(mdata, false);
4892
4893 pr_debug("suspend done fs=%d\n", mdata->suspend_fs_ena);
4894
4895 return 0;
4896}
4897
4898static inline int mdss_mdp_resume_sub(struct mdss_data_type *mdata)
4899{
4900 if (mdata->suspend_fs_ena)
4901 mdss_mdp_footswitch_ctrl(mdata, true);
4902
4903 pr_debug("resume done fs=%d\n", mdata->suspend_fs_ena);
4904
4905 return 0;
4906}
4907
4908#ifdef CONFIG_PM_SLEEP
4909static int mdss_mdp_pm_suspend(struct device *dev)
4910{
4911 struct mdss_data_type *mdata;
4912
4913 mdata = dev_get_drvdata(dev);
4914 if (!mdata)
4915 return -ENODEV;
4916
4917 dev_dbg(dev, "display pm suspend\n");
4918
4919 return mdss_mdp_suspend_sub(mdata);
4920}
4921
4922static int mdss_mdp_pm_resume(struct device *dev)
4923{
4924 struct mdss_data_type *mdata;
4925
4926 mdata = dev_get_drvdata(dev);
4927 if (!mdata)
4928 return -ENODEV;
4929
4930 dev_dbg(dev, "display pm resume\n");
4931
4932 /*
4933 * It is possible that the runtime status of the mdp device may
4934 * have been active when the system was suspended. Reset the runtime
4935 * status to suspended state after a complete system resume.
4936 */
4937 pm_runtime_disable(dev);
4938 pm_runtime_set_suspended(dev);
4939 pm_runtime_enable(dev);
4940
4941 return mdss_mdp_resume_sub(mdata);
4942}
4943#endif
4944
4945#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
4946static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state)
4947{
4948 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4949
4950 if (!mdata)
4951 return -ENODEV;
4952
4953 dev_dbg(&pdev->dev, "display suspend\n");
4954
4955 return mdss_mdp_suspend_sub(mdata);
4956}
4957
4958static int mdss_mdp_resume(struct platform_device *pdev)
4959{
4960 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
4961
4962 if (!mdata)
4963 return -ENODEV;
4964
4965 dev_dbg(&pdev->dev, "display resume\n");
4966
4967 return mdss_mdp_resume_sub(mdata);
4968}
4969#else
4970#define mdss_mdp_suspend NULL
4971#define mdss_mdp_resume NULL
4972#endif
4973
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05304974#ifdef CONFIG_PM
Sachin Bhayareeeb88892018-01-02 16:36:01 +05304975static int mdss_mdp_runtime_resume(struct device *dev)
4976{
4977 struct mdss_data_type *mdata = dev_get_drvdata(dev);
4978 bool device_on = true;
4979
4980 if (!mdata)
4981 return -ENODEV;
4982
4983 dev_dbg(dev, "pm_runtime: resuming. active overlay cnt=%d\n",
4984 atomic_read(&mdata->active_intf_cnt));
4985
4986 /* do not resume panels when coming out of idle power collapse */
4987 if (!mdata->idle_pc)
4988 device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
4989 mdss_mdp_footswitch_ctrl(mdata, true);
4990
4991 return 0;
4992}
4993
4994static int mdss_mdp_runtime_idle(struct device *dev)
4995{
4996 struct mdss_data_type *mdata = dev_get_drvdata(dev);
4997
4998 if (!mdata)
4999 return -ENODEV;
5000
5001 dev_dbg(dev, "pm_runtime: idling...\n");
5002
5003 return 0;
5004}
5005
5006static int mdss_mdp_runtime_suspend(struct device *dev)
5007{
5008 struct mdss_data_type *mdata = dev_get_drvdata(dev);
5009 bool device_on = false;
5010
5011 if (!mdata)
5012 return -ENODEV;
5013 dev_dbg(dev, "pm_runtime: suspending. active overlay cnt=%d\n",
5014 atomic_read(&mdata->active_intf_cnt));
5015
5016 if (mdata->clk_ena) {
5017 pr_err("MDP suspend failed\n");
5018 return -EBUSY;
5019 }
5020
5021 mdss_mdp_footswitch_ctrl(mdata, false);
5022 /* do not suspend panels when going in to idle power collapse */
5023 if (!mdata->idle_pc)
5024 device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
5025
5026 return 0;
5027}
5028#endif
5029
5030static const struct dev_pm_ops mdss_mdp_pm_ops = {
5031 SET_SYSTEM_SLEEP_PM_OPS(mdss_mdp_pm_suspend, mdss_mdp_pm_resume)
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305032#ifdef CONFIG_PM
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305033 SET_RUNTIME_PM_OPS(mdss_mdp_runtime_suspend,
5034 mdss_mdp_runtime_resume,
5035 mdss_mdp_runtime_idle)
Sachin Bhayare3d3767e2018-01-02 21:10:57 +05305036#endif
Sachin Bhayareeeb88892018-01-02 16:36:01 +05305037};
5038
5039static int mdss_mdp_remove(struct platform_device *pdev)
5040{
5041 struct mdss_data_type *mdata = platform_get_drvdata(pdev);
5042
5043 if (!mdata)
5044 return -ENODEV;
5045 pm_runtime_disable(&pdev->dev);
5046 mdss_mdp_pp_term(&pdev->dev);
5047 mdss_mdp_bus_scale_unregister(mdata);
5048 mdss_debugfs_remove(mdata);
5049 if (mdata->regulator_notif_register)
5050 regulator_unregister_notifier(mdata->fs, &(mdata->gdsc_cb));
5051 return 0;
5052}
5053
5054static const struct of_device_id mdss_mdp_dt_match[] = {
5055 { .compatible = "qcom,mdss_mdp",},
5056 {}
5057};
5058MODULE_DEVICE_TABLE(of, mdss_mdp_dt_match);
5059
5060static struct platform_driver mdss_mdp_driver = {
5061 .probe = mdss_mdp_probe,
5062 .remove = mdss_mdp_remove,
5063 .suspend = mdss_mdp_suspend,
5064 .resume = mdss_mdp_resume,
5065 .shutdown = NULL,
5066 .driver = {
5067 /*
5068 * Driver name must match the device name added in
5069 * platform.c.
5070 */
5071 .name = "mdp",
5072 .of_match_table = mdss_mdp_dt_match,
5073 .pm = &mdss_mdp_pm_ops,
5074 },
5075};
5076
5077static int mdss_mdp_register_driver(void)
5078{
5079 return platform_driver_register(&mdss_mdp_driver);
5080}
5081
5082static int __init mdss_mdp_driver_init(void)
5083{
5084 int ret;
5085
5086 ret = mdss_mdp_register_driver();
5087 if (ret) {
5088 pr_err("mdp_register_driver() failed!\n");
5089 return ret;
5090 }
5091
5092 return 0;
5093
5094}
5095
5096module_param_string(panel, mdss_mdp_panel, MDSS_MAX_PANEL_LEN, 0600);
5097/*
5098 * panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>:<panel_topology_cfg>
5099 * where <lk_cfg> is "1"-lk/gcdb config or "0" non-lk/non-gcdb
5100 * config; <pan_intf> is dsi:<ctrl_id> or hdmi or edp
5101 * <pan_intf_cfg> is panel interface specific string
5102 * Ex: This string is panel's device node name from DT
5103 * for DSI interface
5104 * hdmi/edp interface does not use this string
5105 * <panel_topology_cfg> is an optional string. Currently it is
5106 * only valid for DSI panels. In dual-DSI case, it needs to be
5107 * used on both panels or none. When used, format is config%d
5108 * where %d is one of the configuration found in device node of
5109 * panel selected by <pan_intf_cfg>
5110 */
5111MODULE_PARM_DESC(panel, "lk supplied panel selection string");
5112MODULE_PARM_DESC(panel,
5113 "panel=<lk_cfg>:<pan_intf>:<pan_intf_cfg>:<panel_topology_cfg>");
5114module_init(mdss_mdp_driver_init);